repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package flux_test
import (
"context"
"errors"
"os"
"path"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/git"
gitFactory "github.com/aws/eks-anywhere/pkg/git/factory"
gitMocks "github.com/aws/eks-anywhere/pkg/git/mocks"
"github.com/aws/eks-anywhere/pkg/gitops/flux"
fluxMocks "github.com/aws/eks-anywhere/pkg/gitops/flux/mocks"
"github.com/aws/eks-anywhere/pkg/providers"
mocksprovider "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
defaultKustomizationManifestFileName = "kustomization.yaml"
defaultEksaClusterConfigFileName = "eksa-cluster.yaml"
defaultFluxPatchesFileName = "gotk-patches.yaml"
defaultFluxSyncFileName = "gotk-sync.yaml"
)
type fluxTest struct {
*WithT
*testing.T
ctx context.Context
flux *fluxMocks.MockGitOpsFluxClient
git *fluxMocks.MockGitClient
provider *mocksprovider.MockProvider
gitOpsFlux *flux.Flux
writer filewriter.FileWriter
clusterSpec *cluster.Spec
}
func newFluxTest(t *testing.T) fluxTest {
mockCtrl := gomock.NewController(t)
mockGitOpsFlux := fluxMocks.NewMockGitOpsFluxClient(mockCtrl)
mockGit := fluxMocks.NewMockGitClient(mockCtrl)
mockProvider := mocksprovider.NewMockProvider(gomock.NewController(t))
_, w := test.NewWriter(t)
f := flux.NewFluxFromGitOpsFluxClient(mockGitOpsFlux, mockGit, w, nil)
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = clusterConfig
})
return fluxTest{
T: t,
WithT: NewWithT(t),
ctx: context.Background(),
gitOpsFlux: f,
flux: mockGitOpsFlux,
git: mockGit,
provider: mockProvider,
writer: w,
clusterSpec: clusterSpec,
}
}
func (t *fluxTest) setupFlux() (owner, repo, path string) {
t.Helper()
path = "fluxFolder"
owner = "aws"
repo = "eksa-gitops"
t.clusterSpec.FluxConfig = &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
Branch: "main",
Github: &v1alpha1.GithubProviderConfig{
Owner: owner,
Repository: repo,
},
},
}
if err := cluster.SetConfigDefaults(t.clusterSpec.Config); err != nil {
t.Fatal(err)
}
return owner, repo, path
}
func runValidations(validations []validations.Validation) error {
for _, v := range validations {
if err := v().Err; err != nil {
return err
}
}
return nil
}
func datacenterConfig(clusterName string) *v1alpha1.VSphereDatacenterConfig {
return &v1alpha1.VSphereDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.VSphereDatacenterKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Datacenter: "SDDC-Datacenter",
},
}
}
func machineConfig(clusterName string) *v1alpha1.VSphereMachineConfig {
return &v1alpha1.VSphereMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Template: "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6",
},
}
}
func newClusterSpec(t *testing.T, clusterConfig *v1alpha1.Cluster, fluxPath string) *cluster.Spec {
t.Helper()
fluxConfig := v1alpha1.FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.FluxConfigKind,
APIVersion: v1alpha1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-gitops",
Namespace: "default",
},
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "flux-system",
ClusterConfigPath: fluxPath,
Branch: "testBranch",
Github: &v1alpha1.GithubProviderConfig{
Owner: "mFolwer",
Repository: "testRepo",
Personal: true,
},
},
}
clusterConfig.Spec.GitOpsRef = &v1alpha1.Ref{Kind: v1alpha1.FluxConfigKind, Name: "test-gitops"}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = clusterConfig
s.VersionsBundle.Flux = fluxBundle()
s.FluxConfig = &fluxConfig
})
if err := cluster.SetConfigDefaults(clusterSpec.Config); err != nil {
t.Fatal(err)
}
return clusterSpec
}
func fluxBundle() releasev1alpha1.FluxBundle {
return releasev1alpha1.FluxBundle{
SourceController: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/fluxcd/source-controller:v0.12.1-8539f509df046a4f567d2182dde824b957136599",
},
KustomizeController: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/fluxcd/kustomize-controller:v0.11.1-d82011942ec8a447ba89a70ff9a84bf7b9579492",
},
HelmController: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/fluxcd/helm-controller:v0.10.0-d82011942ec8a447ba89a70ff9a84bf7b9579492",
},
NotificationController: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/fluxcd/notification-controller:v0.13.0-d82011942ec8a447ba89a70ff9a84bf7b9579492",
},
}
}
func TestInstallGitOpsOnManagementClusterWithPrexistingRepo(t *testing.T) {
tests := []struct {
testName string
clusterName string
managedbyClusterName string
selfManaged bool
fluxpath string
expectedClusterConfigGitPath string
expectedEksaSystemDirPath string
expectedEksaConfigFileName string
expectedKustomizationFileName string
expectedConfigFileContents string
expectedFluxSystemDirPath string
expectedFluxPatchesFileName string
expectedFluxSyncFileName string
}{
{
testName: "with default config path",
clusterName: "management-cluster",
selfManaged: true,
fluxpath: "",
expectedClusterConfigGitPath: "clusters/management-cluster",
expectedEksaSystemDirPath: "clusters/management-cluster/management-cluster/eksa-system",
expectedEksaConfigFileName: defaultEksaClusterConfigFileName,
expectedKustomizationFileName: defaultKustomizationManifestFileName,
expectedConfigFileContents: "./testdata/cluster-config-default-path-management.yaml",
expectedFluxSystemDirPath: "clusters/management-cluster/flux-system",
expectedFluxPatchesFileName: defaultFluxPatchesFileName,
expectedFluxSyncFileName: defaultFluxSyncFileName,
},
{
testName: "with user provided config path",
clusterName: "management-cluster",
selfManaged: true,
fluxpath: "user/provided/path",
expectedClusterConfigGitPath: "user/provided/path",
expectedEksaSystemDirPath: "user/provided/path/management-cluster/eksa-system",
expectedEksaConfigFileName: defaultEksaClusterConfigFileName,
expectedKustomizationFileName: defaultKustomizationManifestFileName,
expectedConfigFileContents: "./testdata/cluster-config-user-provided-path.yaml",
expectedFluxSystemDirPath: "user/provided/path/flux-system",
expectedFluxPatchesFileName: defaultFluxPatchesFileName,
expectedFluxSyncFileName: defaultFluxSyncFileName,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster(tt.clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, tt.fluxpath)
g.flux.EXPECT().BootstrapGithub(g.ctx, cluster, clusterSpec.FluxConfig)
g.git.EXPECT().GetRepo(g.ctx).Return(&git.Repository{Name: clusterSpec.FluxConfig.Spec.Github.Repository}, nil)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(path.Dir(tt.expectedClusterConfigGitPath)).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
g.git.EXPECT().Pull(g.ctx, clusterSpec.FluxConfig.Spec.Branch).Return(nil)
datacenterConfig := datacenterConfig(tt.clusterName)
machineConfig := machineConfig(tt.clusterName)
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
expectedEksaClusterConfigPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedEksaConfigFileName)
test.AssertFilesEquals(t, expectedEksaClusterConfigPath, tt.expectedConfigFileContents)
expectedKustomizationPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedKustomizationFileName)
test.AssertFilesEquals(t, expectedKustomizationPath, "./testdata/kustomization.yaml")
expectedFluxPatchesPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxPatchesFileName)
test.AssertFilesEquals(t, expectedFluxPatchesPath, "./testdata/gotk-patches.yaml")
expectedFluxSyncPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxSyncFileName)
test.AssertFilesEquals(t, expectedFluxSyncPath, "./testdata/gotk-sync.yaml")
})
}
}
func TestInstallGitOpsOnManagementClusterWithoutClusterSpec(t *testing.T) {
tests := []struct {
testName string
clusterName string
managedbyClusterName string
fluxpath string
expectedClusterConfigGitPath string
expectedEksaSystemDirPath string
expectedEksaConfigFileName string
expectedKustomizationFileName string
expectedFluxSystemDirPath string
expectedFluxPatchesFileName string
expectedFluxSyncFileName string
}{
{
testName: "with default config path",
clusterName: "management-cluster",
fluxpath: "",
expectedClusterConfigGitPath: "clusters/management-cluster",
expectedEksaSystemDirPath: "clusters/management-cluster/management-cluster/eksa-system",
expectedEksaConfigFileName: defaultEksaClusterConfigFileName,
expectedKustomizationFileName: defaultKustomizationManifestFileName,
expectedFluxSystemDirPath: "clusters/management-cluster/flux-system",
expectedFluxPatchesFileName: defaultFluxPatchesFileName,
expectedFluxSyncFileName: defaultFluxSyncFileName,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster(tt.clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, tt.fluxpath)
g.flux.EXPECT().BootstrapGithub(g.ctx, cluster, clusterSpec.FluxConfig)
g.git.EXPECT().GetRepo(g.ctx).Return(&git.Repository{Name: clusterSpec.FluxConfig.Spec.Github.Repository}, nil)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(path.Dir(tt.expectedClusterConfigGitPath)).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
g.git.EXPECT().Pull(g.ctx, clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, nil, nil)).To(Succeed())
expectedEksaClusterConfigPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedEksaConfigFileName)
g.Expect(validations.FileExists(expectedEksaClusterConfigPath)).To(Equal(false))
expectedKustomizationPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedKustomizationFileName)
g.Expect(validations.FileExists(expectedKustomizationPath)).To(Equal(false))
expectedFluxPatchesPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxPatchesFileName)
test.AssertFilesEquals(t, expectedFluxPatchesPath, "./testdata/gotk-patches.yaml")
expectedFluxSyncPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxSyncFileName)
test.AssertFilesEquals(t, expectedFluxSyncPath, "./testdata/gotk-sync.yaml")
})
}
}
func TestInstallGitOpsOnWorkloadClusterWithPrexistingRepo(t *testing.T) {
cluster := &types.Cluster{}
clusterName := "workload-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
clusterConfig.SetManagedBy("management-cluster")
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g.flux.EXPECT().BootstrapGithub(g.ctx, cluster, clusterSpec.FluxConfig)
g.git.EXPECT().GetRepo(g.ctx).Return(&git.Repository{Name: clusterSpec.FluxConfig.Spec.Github.Repository}, nil)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(path.Dir("clusters/management-cluster")).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
g.git.EXPECT().Pull(g.ctx, clusterSpec.FluxConfig.Spec.Branch).Return(nil)
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
expectedEksaClusterConfigPath := path.Join(g.writer.Dir(), "clusters/management-cluster/workload-cluster/eksa-system", defaultEksaClusterConfigFileName)
test.AssertFilesEquals(t, expectedEksaClusterConfigPath, "./testdata/cluster-config-default-path-workload.yaml")
expectedKustomizationPath := path.Join(g.writer.Dir(), "clusters/management-cluster/workload-cluster/eksa-system", defaultKustomizationManifestFileName)
test.AssertFilesEquals(t, expectedKustomizationPath, "./testdata/kustomization.yaml")
expectedFluxPatchesPath := path.Join(g.writer.Dir(), "clusters/management-cluster/flux-system", defaultFluxPatchesFileName)
if _, err := os.Stat(expectedFluxPatchesPath); errors.Is(err, os.ErrExist) {
t.Errorf("File exists at %s, should not exist", expectedFluxPatchesPath)
}
expectedFluxSyncPath := path.Join(g.writer.Dir(), "clusters/management-cluster/flux-system", defaultFluxSyncFileName)
if _, err := os.Stat(expectedFluxSyncPath); errors.Is(err, os.ErrExist) {
t.Errorf("File exists at %s, should not exist", expectedFluxSyncPath)
}
}
func TestInstallGitOpsSetupRepoError(t *testing.T) {
cluster := &types.Cluster{}
clusterName := "test-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g.git.EXPECT().GetRepo(g.ctx).Return(&git.Repository{Name: clusterSpec.FluxConfig.Spec.Github.Repository}, nil)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(path.Dir("clusters/management-cluster")).Return(errors.New("error in add"))
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, nil, nil)).To(MatchError(ContainSubstring("error in add")))
}
func TestInstallGitOpsBootstrapError(t *testing.T) {
cluster := &types.Cluster{}
clusterName := "test-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g.git.EXPECT().GetRepo(g.ctx).Return(&git.Repository{Name: clusterSpec.FluxConfig.Spec.Github.Repository}, nil)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(path.Dir("clusters/management-cluster")).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
g.flux.EXPECT().BootstrapGithub(g.ctx, cluster, clusterSpec.FluxConfig).Return(errors.New("error in bootstrap"))
g.flux.EXPECT().Uninstall(g.ctx, cluster, clusterSpec.FluxConfig).Return(nil)
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, nil, nil)).To(MatchError(ContainSubstring("error in bootstrap")))
}
func TestInstallGitOpsGitProviderSuccess(t *testing.T) {
cluster := &types.Cluster{}
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, "")
clusterSpec.FluxConfig.Spec.Git = &v1alpha1.GitProviderConfig{RepositoryUrl: "git.xyz"}
clusterSpec.FluxConfig.Spec.Github = nil
g.flux.EXPECT().BootstrapGit(g.ctx, cluster, clusterSpec.FluxConfig, nil)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(path.Dir("clusters/management-cluster")).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
g.git.EXPECT().Pull(g.ctx, clusterSpec.FluxConfig.Spec.Branch).Return(nil)
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
}
func TestInstallGitOpsCommitFilesError(t *testing.T) {
cluster := &types.Cluster{}
clusterName := "test-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g.git.EXPECT().GetRepo(g.ctx).Return(&git.Repository{Name: clusterSpec.FluxConfig.Spec.Github.Repository}, nil)
g.git.EXPECT().Clone(g.ctx).Return(errors.New("error in clone"))
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, nil, nil)).To(MatchError(ContainSubstring("error in clone")))
}
func TestInstallGitOpsNoPrexistingRepo(t *testing.T) {
tests := []struct {
testName string
clusterName string
fluxpath string
expectedClusterConfigGitPath string
expectedEksaSystemDirPath string
expectedEksaConfigFileName string
expectedKustomizationFileName string
expectedConfigFileContents string
expectedFluxSystemDirPath string
expectedFluxPatchesFileName string
expectedFluxSyncFileName string
expectedRepoUrl string
}{
{
testName: "with default config path",
clusterName: "management-cluster",
fluxpath: "",
expectedClusterConfigGitPath: "clusters/management-cluster",
expectedEksaSystemDirPath: "clusters/management-cluster/management-cluster/eksa-system",
expectedEksaConfigFileName: defaultEksaClusterConfigFileName,
expectedKustomizationFileName: defaultKustomizationManifestFileName,
expectedConfigFileContents: "./testdata/cluster-config-default-path-management.yaml",
expectedFluxSystemDirPath: "clusters/management-cluster/flux-system",
expectedFluxPatchesFileName: defaultFluxPatchesFileName,
expectedFluxSyncFileName: defaultFluxSyncFileName,
},
{
testName: "with user provided config path",
clusterName: "management-cluster",
fluxpath: "user/provided/path",
expectedClusterConfigGitPath: "user/provided/path",
expectedEksaSystemDirPath: "user/provided/path/management-cluster/eksa-system",
expectedEksaConfigFileName: defaultEksaClusterConfigFileName,
expectedKustomizationFileName: defaultKustomizationManifestFileName,
expectedConfigFileContents: "./testdata/cluster-config-user-provided-path.yaml",
expectedFluxSystemDirPath: "user/provided/path/flux-system",
expectedFluxPatchesFileName: defaultFluxPatchesFileName,
expectedFluxSyncFileName: defaultFluxSyncFileName,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster(tt.clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, tt.fluxpath)
g.flux.EXPECT().BootstrapGithub(g.ctx, cluster, clusterSpec.FluxConfig)
n := clusterSpec.FluxConfig.Spec.Github.Repository
o := clusterSpec.FluxConfig.Spec.Github.Owner
p := clusterSpec.FluxConfig.Spec.Github.Personal
b := clusterSpec.FluxConfig.Spec.Branch
d := "EKS-A cluster configuration repository"
createRepoOpts := git.CreateRepoOpts{Name: n, Owner: o, Description: d, Personal: p, Privacy: true}
g.git.EXPECT().GetRepo(g.ctx).Return(nil, nil)
g.git.EXPECT().CreateRepo(g.ctx, createRepoOpts).Return(nil)
g.git.EXPECT().Init().Return(nil)
g.git.EXPECT().Commit(gomock.Any()).Return(nil)
g.git.EXPECT().Branch(b).Return(nil)
g.git.EXPECT().Add(path.Dir(tt.expectedClusterConfigGitPath)).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
g.git.EXPECT().Pull(g.ctx, b).Return(nil)
datacenterConfig := datacenterConfig(tt.clusterName)
machineConfig := machineConfig(tt.clusterName)
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
expectedEksaClusterConfigPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedEksaConfigFileName)
test.AssertFilesEquals(t, expectedEksaClusterConfigPath, tt.expectedConfigFileContents)
expectedKustomizationPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedKustomizationFileName)
test.AssertFilesEquals(t, expectedKustomizationPath, "./testdata/kustomization.yaml")
expectedFluxPatchesPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxPatchesFileName)
test.AssertFilesEquals(t, expectedFluxPatchesPath, "./testdata/gotk-patches.yaml")
expectedFluxSyncPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxSyncFileName)
test.AssertFilesEquals(t, expectedFluxSyncPath, "./testdata/gotk-sync.yaml")
})
}
}
func TestInstallGitOpsToolkitsBareRepo(t *testing.T) {
tests := []struct {
testName string
clusterName string
fluxpath string
expectedClusterConfigGitPath string
expectedEksaSystemDirPath string
expectedEksaConfigFileName string
expectedKustomizationFileName string
expectedConfigFileContents string
expectedFluxSystemDirPath string
expectedFluxPatchesFileName string
expectedFluxSyncFileName string
}{
{
testName: "with default config path",
clusterName: "management-cluster",
fluxpath: "",
expectedClusterConfigGitPath: "clusters/management-cluster",
expectedEksaSystemDirPath: "clusters/management-cluster/management-cluster/eksa-system",
expectedEksaConfigFileName: defaultEksaClusterConfigFileName,
expectedKustomizationFileName: defaultKustomizationManifestFileName,
expectedConfigFileContents: "./testdata/cluster-config-default-path-management.yaml",
expectedFluxSystemDirPath: "clusters/management-cluster/flux-system",
expectedFluxPatchesFileName: defaultFluxPatchesFileName,
expectedFluxSyncFileName: defaultFluxSyncFileName,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster(tt.clusterName)
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, tt.fluxpath)
g.flux.EXPECT().BootstrapGithub(g.ctx, cluster, clusterSpec.FluxConfig)
g.git.EXPECT().GetRepo(g.ctx).MaxTimes(2).Return(&git.Repository{Name: clusterSpec.FluxConfig.Spec.Github.Repository}, nil)
g.git.EXPECT().Clone(g.ctx).MaxTimes(2).Return(&git.RepositoryIsEmptyError{Repository: "testRepo"})
g.git.EXPECT().Init().Return(nil)
g.git.EXPECT().Commit(gomock.Any()).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(path.Dir(tt.expectedClusterConfigGitPath)).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
g.git.EXPECT().Pull(g.ctx, clusterSpec.FluxConfig.Spec.Branch).Return(nil)
datacenterConfig := datacenterConfig(tt.clusterName)
machineConfig := machineConfig(tt.clusterName)
g.Expect(g.gitOpsFlux.InstallGitOps(g.ctx, cluster, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
expectedEksaClusterConfigPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedEksaConfigFileName)
test.AssertFilesEquals(t, expectedEksaClusterConfigPath, tt.expectedConfigFileContents)
expectedKustomizationPath := path.Join(g.writer.Dir(), tt.expectedEksaSystemDirPath, tt.expectedKustomizationFileName)
test.AssertFilesEquals(t, expectedKustomizationPath, "./testdata/kustomization.yaml")
expectedFluxPatchesPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxPatchesFileName)
test.AssertFilesEquals(t, expectedFluxPatchesPath, "./testdata/gotk-patches.yaml")
expectedFluxSyncPath := path.Join(g.writer.Dir(), tt.expectedFluxSystemDirPath, tt.expectedFluxSyncFileName)
test.AssertFilesEquals(t, expectedFluxSyncPath, "./testdata/gotk-sync.yaml")
})
}
}
func TestResumeClusterResourcesReconcile(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterConfig.Spec.DatacenterRef = v1alpha1.Ref{Name: "datacenter"}
clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "cp-machine"}
clusterConfig.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &v1alpha1.Ref{Name: "worker-machine"},
},
}
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "")
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "providerDatacenter", "datacenter", "")
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "providerMachineConfig", "cp-machine", "")
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "providerMachineConfig", "worker-machine", "")
g.provider.EXPECT().DatacenterResourceType().Return("providerDatacenter")
g.provider.EXPECT().MachineResourceType().Return("providerMachineConfig").Times(3)
g.Expect(g.gitOpsFlux.ResumeClusterResourcesReconcile(g.ctx, cluster, clusterSpec, g.provider)).To(Succeed())
}
func TestResumeClusterResourcesReconcileEnableClusterReconcileError(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "").Return(errors.New("error in enable cluster reconcile"))
g.Expect(g.gitOpsFlux.ResumeClusterResourcesReconcile(g.ctx, cluster, clusterSpec, nil)).To(MatchError(ContainSubstring("error in enable cluster reconcile")))
}
func TestResumeClusterResourcesReconcileEnableDatacenterReconcileError(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterConfig.Spec.DatacenterRef = v1alpha1.Ref{Name: "datacenter"}
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "")
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "providerDatacenter", "datacenter", "").Return(errors.New("error in enable datacenter reconcile"))
g.provider.EXPECT().DatacenterResourceType().Return("providerDatacenter").Times(2)
g.Expect(g.gitOpsFlux.ResumeClusterResourcesReconcile(g.ctx, cluster, clusterSpec, g.provider)).To(MatchError(ContainSubstring("error in enable datacenter reconcile")))
}
func TestResumeClusterResourcesReconcileEnableMachineReconcileError(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterConfig.Spec.DatacenterRef = v1alpha1.Ref{Name: "datacenter"}
clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "cp-machine"}
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "")
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "providerDatacenter", "datacenter", "")
g.flux.EXPECT().EnableResourceReconcile(g.ctx, cluster, "providerMachineConfig", "cp-machine", "").Return(errors.New("error in enable machine reconcile"))
g.provider.EXPECT().DatacenterResourceType().Return("providerDatacenter")
g.provider.EXPECT().MachineResourceType().Return("providerMachineConfig").Times(3)
g.Expect(g.gitOpsFlux.ResumeClusterResourcesReconcile(g.ctx, cluster, clusterSpec, g.provider)).To(MatchError(ContainSubstring("error in enable machine reconcile")))
}
func TestPauseClusterResourcesReconcile(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := newClusterSpec(t, clusterConfig, "")
clusterConfig.Spec.DatacenterRef = v1alpha1.Ref{Name: "datacenter"}
clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "cp-machine"}
clusterConfig.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &v1alpha1.Ref{Name: "worker-machine"},
},
}
g := newFluxTest(t)
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "")
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "providerDatacenter", "datacenter", "")
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "providerMachineConfig", "cp-machine", "")
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "providerMachineConfig", "worker-machine", "")
g.provider.EXPECT().DatacenterResourceType().Return("providerDatacenter")
g.provider.EXPECT().MachineResourceType().Return("providerMachineConfig").Times(3)
g.Expect(g.gitOpsFlux.PauseClusterResourcesReconcile(g.ctx, cluster, clusterSpec, g.provider)).To(Succeed())
}
func TestPauseClusterResourcesReconcileEnableClusterReconcileError(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "").Return(errors.New("error in enable cluster reconcile"))
g.Expect(g.gitOpsFlux.PauseClusterResourcesReconcile(g.ctx, cluster, clusterSpec, nil)).To(MatchError(ContainSubstring("error in enable cluster reconcile")))
}
func TestPauseClusterResourcesReconcileEnableDatacenterReconcileError(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterConfig.Spec.DatacenterRef = v1alpha1.Ref{Name: "datacenter"}
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "")
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "providerDatacenter", "datacenter", "").Return(errors.New("error in enable datacenter reconcile"))
g.provider.EXPECT().DatacenterResourceType().Return("providerDatacenter").Times(2)
g.Expect(g.gitOpsFlux.PauseClusterResourcesReconcile(g.ctx, cluster, clusterSpec, g.provider)).To(MatchError(ContainSubstring("error in enable datacenter reconcile")))
}
func TestPauseClusterResourcesReconcileEnableMachineReconcileError(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterConfig.Spec.DatacenterRef = v1alpha1.Ref{Name: "datacenter"}
clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "cp-machine"}
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "clusters.anywhere.eks.amazonaws.com", "management-cluster", "")
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "providerDatacenter", "datacenter", "")
g.flux.EXPECT().DisableResourceReconcile(g.ctx, cluster, "providerMachineConfig", "cp-machine", "").Return(errors.New("error in enable machine reconcile"))
g.provider.EXPECT().DatacenterResourceType().Return("providerDatacenter")
g.provider.EXPECT().MachineResourceType().Return("providerMachineConfig").Times(3)
g.Expect(g.gitOpsFlux.PauseClusterResourcesReconcile(g.ctx, cluster, clusterSpec, g.provider)).To(MatchError(ContainSubstring("error in enable machine reconcile")))
}
func TestUpdateGitRepoEksaSpecLocalRepoNotExists(t *testing.T) {
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
eksaSystemDirPath := "clusters/management-cluster/management-cluster/eksa-system"
g := newFluxTest(t)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(eksaSystemDirPath).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).Return(nil)
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
expectedEksaClusterConfigPath := path.Join(g.writer.Dir(), eksaSystemDirPath, defaultEksaClusterConfigFileName)
test.AssertFilesEquals(t, expectedEksaClusterConfigPath, "./testdata/cluster-config-default-path-management.yaml")
}
func TestUpdateGitRepoEksaSpecLocalRepoExists(t *testing.T) {
g := newFluxTest(t)
mockCtrl := gomock.NewController(t)
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
eksaSystemDirPath := "clusters/management-cluster/management-cluster/eksa-system"
clusterSpec := newClusterSpec(t, clusterConfig, "")
mocks := fluxMocks.NewMockFluxClient(mockCtrl)
gitProvider := gitMocks.NewMockProviderClient(mockCtrl)
gitClient := gitMocks.NewMockClient(mockCtrl)
gitClient.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
gitClient.EXPECT().Add(eksaSystemDirPath).Return(nil)
gitClient.EXPECT().Commit(test.OfType("string")).Return(nil)
gitClient.EXPECT().Push(g.ctx).Return(nil)
writePath, w := test.NewWriter(t)
if _, err := w.WithDir(".git"); err != nil {
t.Errorf("failed to add .git dir: %v", err)
}
fGitOptions := &gitFactory.GitTools{
Provider: gitProvider,
Client: gitClient,
Writer: w,
}
f := flux.NewFlux(mocks, nil, fGitOptions, nil)
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(f.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
expectedEksaClusterConfigPath := path.Join(writePath, eksaSystemDirPath, defaultEksaClusterConfigFileName)
test.AssertFilesEquals(t, expectedEksaClusterConfigPath, "./testdata/cluster-config-default-path-management.yaml")
}
func TestUpdateGitRepoEksaSpecErrorCloneRepo(t *testing.T) {
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.git.EXPECT().Clone(g.ctx).MaxTimes(2).Return(errors.New("error in cloneIfExists repo"))
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(MatchError(ContainSubstring("error in cloneIfExists repo")))
}
func TestUpdateGitRepoEksaSpecErrorSwitchBranch(t *testing.T) {
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(errors.New("failed to switch branch"))
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(MatchError(ContainSubstring("failed to switch branch")))
}
func TestUpdateGitRepoEksaSpecErrorAddFile(t *testing.T) {
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add("clusters/management-cluster/management-cluster/eksa-system").Return(errors.New("failed to add file"))
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(MatchError(ContainSubstring("failed to add file")))
}
func TestUpdateGitRepoEksaSpecErrorCommit(t *testing.T) {
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add("clusters/management-cluster/management-cluster/eksa-system").Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(errors.New("failed to commit"))
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(MatchError(ContainSubstring("failed to commit")))
}
func TestUpdateGitRepoEksaSpecErrorPushAfterRetry(t *testing.T) {
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add("clusters/management-cluster/management-cluster/eksa-system").Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(g.ctx).MaxTimes(2).Return(errors.New("failed to push code"))
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(g.gitOpsFlux.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(MatchError(ContainSubstring("failed to push code")))
}
func TestUpdateGitRepoEksaSpecSkip(t *testing.T) {
g := newFluxTest(t)
clusterName := "management-cluster"
clusterConfig := v1alpha1.NewCluster(clusterName)
clusterSpec := newClusterSpec(t, clusterConfig, "")
f := flux.NewFlux(nil, nil, nil, nil)
datacenterConfig := datacenterConfig(clusterName)
machineConfig := machineConfig(clusterName)
g.Expect(f.UpdateGitEksaSpec(g.ctx, clusterSpec, datacenterConfig, []providers.MachineConfig{machineConfig})).To(Succeed())
}
func TestForceReconcileGitRepo(t *testing.T) {
cluster := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("")
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.flux.EXPECT().ForceReconcile(g.ctx, cluster, "flux-system")
g.Expect(g.gitOpsFlux.ForceReconcileGitRepo(g.ctx, cluster, clusterSpec)).To(Succeed())
}
func TestForceReconcileGitRepoSkip(t *testing.T) {
cluster := &types.Cluster{}
g := newFluxTest(t)
f := flux.NewFlux(nil, nil, nil, nil)
g.Expect(f.ForceReconcileGitRepo(g.ctx, cluster, g.clusterSpec)).To(Succeed())
}
func TestCleanupGitRepo(t *testing.T) {
g := newFluxTest(t)
mockCtrl := gomock.NewController(t)
clusterConfig := v1alpha1.NewCluster("management-cluster")
expectedClusterPath := "clusters/management-cluster"
clusterSpec := newClusterSpec(t, clusterConfig, "")
gitProvider := gitMocks.NewMockProviderClient(mockCtrl)
gitClient := gitMocks.NewMockClient(mockCtrl)
gitClient.EXPECT().Clone(g.ctx).Return(nil)
gitClient.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
gitClient.EXPECT().Remove(expectedClusterPath).Return(nil)
gitClient.EXPECT().Commit(test.OfType("string")).Return(nil)
gitClient.EXPECT().Push(g.ctx).Return(nil)
_, w := test.NewWriter(t)
if _, err := w.WithDir(expectedClusterPath); err != nil {
t.Errorf("failed to add %s dir: %v", expectedClusterPath, err)
}
fGitOptions := &gitFactory.GitTools{
Provider: gitProvider,
Client: gitClient,
Writer: w,
}
f := flux.NewFlux(nil, nil, fGitOptions, nil)
g.Expect(f.CleanupGitRepo(g.ctx, clusterSpec)).To(Succeed())
}
func TestCleanupGitRepoWorkloadCluster(t *testing.T) {
g := newFluxTest(t)
mockCtrl := gomock.NewController(t)
clusterConfig := v1alpha1.NewCluster("workload-cluster")
clusterConfig.SetManagedBy("management-cluster")
expectedClusterPath := "clusters/management-cluster/workload-cluster/" + constants.EksaSystemNamespace
clusterSpec := newClusterSpec(t, clusterConfig, "")
gitProvider := gitMocks.NewMockProviderClient(mockCtrl)
gitClient := gitMocks.NewMockClient(mockCtrl)
gitClient.EXPECT().Clone(g.ctx).Return(nil)
gitClient.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
gitClient.EXPECT().Remove(expectedClusterPath).Return(nil)
gitClient.EXPECT().Commit(test.OfType("string")).Return(nil)
gitClient.EXPECT().Push(g.ctx).Return(nil)
_, w := test.NewWriter(t)
if _, err := w.WithDir(expectedClusterPath); err != nil {
t.Errorf("failed to add %s dir: %v", expectedClusterPath, err)
}
fGitOptions := &gitFactory.GitTools{
Provider: gitProvider,
Client: gitClient,
Writer: w,
}
f := flux.NewFlux(nil, nil, fGitOptions, nil)
g.Expect(f.CleanupGitRepo(g.ctx, clusterSpec)).To(Succeed())
}
func TestCleanupGitRepoSkip(t *testing.T) {
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := newClusterSpec(t, clusterConfig, "")
g := newFluxTest(t)
g.git.EXPECT().Clone(g.ctx).Return(nil)
g.git.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
g.Expect(g.gitOpsFlux.CleanupGitRepo(g.ctx, clusterSpec)).To(Succeed())
}
func TestCleanupGitRepoRemoveError(t *testing.T) {
g := newFluxTest(t)
mockCtrl := gomock.NewController(t)
clusterConfig := v1alpha1.NewCluster("management-cluster")
expectedClusterPath := "clusters/management-cluster"
clusterSpec := newClusterSpec(t, clusterConfig, "")
gitProvider := gitMocks.NewMockProviderClient(mockCtrl)
gitClient := gitMocks.NewMockClient(mockCtrl)
gitClient.EXPECT().Clone(g.ctx).Return(nil)
gitClient.EXPECT().Branch(clusterSpec.FluxConfig.Spec.Branch).Return(nil)
gitClient.EXPECT().Remove(expectedClusterPath).Return(errors.New("error in remove"))
_, w := test.NewWriter(t)
if _, err := w.WithDir(expectedClusterPath); err != nil {
t.Errorf("failed to add %s dir: %v", expectedClusterPath, err)
}
fGitOptions := &gitFactory.GitTools{
Provider: gitProvider,
Client: gitClient,
Writer: w,
}
f := flux.NewFlux(nil, nil, fGitOptions, nil)
g.Expect(f.CleanupGitRepo(g.ctx, clusterSpec)).To(MatchError(ContainSubstring("error in remove")))
}
func TestValidationsSkipFLux(t *testing.T) {
g := newFluxTest(t)
g.gitOpsFlux = flux.NewFlux(g.flux, nil, nil, nil)
g.Expect(g.gitOpsFlux.Validations(g.ctx, g.clusterSpec)).To(BeEmpty())
}
func TestValidationsErrorFromPathExists(t *testing.T) {
g := newFluxTest(t)
owner, repo, path := g.setupFlux()
g.git.EXPECT().PathExists(g.ctx, owner, repo, "main", path).Return(false, errors.New("error from git"))
g.Expect(runValidations(g.gitOpsFlux.Validations(g.ctx, g.clusterSpec))).NotTo(Succeed())
}
func TestValidationsPath(t *testing.T) {
g := newFluxTest(t)
owner, repo, path := g.setupFlux()
g.git.EXPECT().PathExists(g.ctx, owner, repo, "main", path).Return(true, nil)
g.Expect(runValidations(g.gitOpsFlux.Validations(g.ctx, g.clusterSpec))).NotTo(Succeed())
}
func TestValidationsSuccess(t *testing.T) {
g := newFluxTest(t)
owner, repo, path := g.setupFlux()
g.git.EXPECT().PathExists(g.ctx, owner, repo, "main", path).Return(false, nil)
g.Expect(runValidations(g.gitOpsFlux.Validations(g.ctx, g.clusterSpec))).To(Succeed())
}
func TestBootstrapGithubSkip(t *testing.T) {
g := newFluxTest(t)
c := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := newClusterSpec(t, clusterConfig, "")
clusterSpec.FluxConfig.Spec.Github = nil
g.Expect(g.gitOpsFlux.Bootstrap(g.ctx, c, clusterSpec)).To(Succeed())
}
func TestBootstrapGithubError(t *testing.T) {
g := newFluxTest(t)
c := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := newClusterSpec(t, clusterConfig, "")
g.flux.EXPECT().BootstrapGithub(g.ctx, c, clusterSpec.FluxConfig).Return(errors.New("error in bootstrap github"))
g.flux.EXPECT().Uninstall(g.ctx, c, clusterSpec.FluxConfig).Return(nil)
g.Expect(g.gitOpsFlux.Bootstrap(g.ctx, c, clusterSpec)).To(MatchError(ContainSubstring("error in bootstrap github")))
}
func TestBootstrapGitError(t *testing.T) {
g := newFluxTest(t)
c := &types.Cluster{}
clusterConfig := v1alpha1.NewCluster("management-cluster")
clusterSpec := newClusterSpec(t, clusterConfig, "")
clusterSpec.FluxConfig.Spec.Git = &v1alpha1.GitProviderConfig{RepositoryUrl: "abc"}
g.flux.EXPECT().BootstrapGithub(g.ctx, c, clusterSpec.FluxConfig).Return(nil)
g.flux.EXPECT().BootstrapGit(g.ctx, c, clusterSpec.FluxConfig, nil).Return(errors.New("error in bootstrap git"))
g.flux.EXPECT().Uninstall(g.ctx, c, clusterSpec.FluxConfig).Return(nil)
g.Expect(g.gitOpsFlux.Bootstrap(g.ctx, c, clusterSpec)).To(MatchError(ContainSubstring("error in bootstrap git")))
}
func TestUninstallError(t *testing.T) {
g := newFluxTest(t)
c := &types.Cluster{}
g.flux.EXPECT().Uninstall(g.ctx, c, g.clusterSpec.FluxConfig).Return(errors.New("error in uninstall"))
g.Expect(g.gitOpsFlux.Uninstall(g.ctx, c, g.clusterSpec)).To(MatchError(ContainSubstring("error in uninstall")))
}
| 1,082 |
eks-anywhere | aws | Go | package flux
import (
"context"
"github.com/aws/eks-anywhere/pkg/git"
gitFactory "github.com/aws/eks-anywhere/pkg/git/factory"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type gitClient struct {
git git.Client
gitProvider git.ProviderClient
*retrier.Retrier
}
func newGitClient(gitTools *gitFactory.GitTools) *gitClient {
if gitTools == nil {
return nil
}
return &gitClient{
git: gitTools.Client,
gitProvider: gitTools.Provider,
Retrier: retrier.NewWithMaxRetries(maxRetries, backOffPeriod),
}
}
func (c *gitClient) GetRepo(ctx context.Context) (repo *git.Repository, err error) {
if c.gitProvider == nil {
return nil, nil
}
err = c.Retry(
func() error {
repo, err = c.gitProvider.GetRepo(ctx)
return err
},
)
return repo, err
}
func (c *gitClient) CreateRepo(ctx context.Context, opts git.CreateRepoOpts) error {
if c.gitProvider == nil {
return nil
}
return c.Retry(
func() error {
_, err := c.gitProvider.CreateRepo(ctx, opts)
return err
},
)
}
func (c *gitClient) Clone(ctx context.Context) error {
return c.Retry(
func() error {
return c.git.Clone(ctx)
},
)
}
func (c *gitClient) Push(ctx context.Context) error {
return c.Retry(
func() error {
return c.git.Push(ctx)
},
)
}
func (c *gitClient) Pull(ctx context.Context, branch string) error {
return c.Retry(
func() error {
return c.git.Pull(ctx, branch)
},
)
}
func (c *gitClient) PathExists(ctx context.Context, owner, repo, branch, path string) (exists bool, err error) {
if c.gitProvider == nil {
return false, nil
}
err = c.Retry(
func() error {
exists, err = c.gitProvider.PathExists(ctx, owner, repo, branch, path)
return err
},
)
return exists, err
}
func (c *gitClient) Add(filename string) error {
return c.git.Add(filename)
}
func (c *gitClient) Remove(filename string) error {
return c.git.Remove(filename)
}
func (c *gitClient) Commit(message string) error {
return c.git.Commit(message)
}
func (c *gitClient) Branch(name string) error {
return c.git.Branch(name)
}
func (c *gitClient) Init() error {
return c.git.Init()
}
| 113 |
eks-anywhere | aws | Go | package flux
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/git"
gitFactory "github.com/aws/eks-anywhere/pkg/git/factory"
"github.com/aws/eks-anywhere/pkg/git/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type gitClientTest struct {
*WithT
ctx context.Context
c *gitClient
g *mocks.MockClient
p *mocks.MockProviderClient
}
func newGitClientTest(t *testing.T) *gitClientTest {
ctrl := gomock.NewController(t)
g := mocks.NewMockClient(ctrl)
p := mocks.NewMockProviderClient(ctrl)
tool := &gitFactory.GitTools{
Provider: p,
Client: g,
}
c := newGitClient(tool)
c.Retrier = retrier.NewWithMaxRetries(maxRetries, 0)
return &gitClientTest{
WithT: NewWithT(t),
ctx: context.Background(),
c: c,
g: g,
p: p,
}
}
func TestGitClientGetRepoSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.p.EXPECT().GetRepo(tt.ctx).Return(nil, errors.New("error in get repo")).Times(4)
tt.p.EXPECT().GetRepo(tt.ctx).Return(nil, nil).Times(1)
_, err := tt.c.GetRepo(tt.ctx)
tt.Expect(err).To(Succeed(), "gitClient.GetRepo() should succeed with 5 tries")
}
func TestGitClientGetRepoSkip(t *testing.T) {
tt := newGitClientTest(t)
c := newGitClient(&gitFactory.GitTools{Provider: nil, Client: tt.g})
_, err := c.GetRepo(tt.ctx)
tt.Expect(err).To(Succeed())
}
func TestGitClientGetRepoError(t *testing.T) {
tt := newGitClientTest(t)
tt.p.EXPECT().GetRepo(tt.ctx).Return(nil, errors.New("error in get repo")).Times(5)
tt.p.EXPECT().GetRepo(tt.ctx).Return(nil, nil).AnyTimes()
_, err := tt.c.GetRepo(tt.ctx)
tt.Expect(err).To(MatchError(ContainSubstring("error in get repo")), "gitClient.GetRepo() should fail after 5 tries")
}
func TestGitClientCreateRepoSuccess(t *testing.T) {
tt := newGitClientTest(t)
opts := git.CreateRepoOpts{}
tt.p.EXPECT().CreateRepo(tt.ctx, opts).Return(nil, errors.New("error in create repo")).Times(4)
tt.p.EXPECT().CreateRepo(tt.ctx, opts).Return(nil, nil).Times(1)
tt.Expect(tt.c.CreateRepo(tt.ctx, opts)).To(Succeed(), "gitClient.CreateRepo() should succeed with 5 tries")
}
func TestGitClientCreateRepoSkip(t *testing.T) {
tt := newGitClientTest(t)
opts := git.CreateRepoOpts{}
c := newGitClient(&gitFactory.GitTools{Provider: nil, Client: tt.g})
tt.Expect(c.CreateRepo(tt.ctx, opts)).To(Succeed())
}
func TestGitClientCreateRepoError(t *testing.T) {
tt := newGitClientTest(t)
opts := git.CreateRepoOpts{}
tt.p.EXPECT().CreateRepo(tt.ctx, opts).Return(nil, errors.New("error in create repo")).Times(5)
tt.p.EXPECT().CreateRepo(tt.ctx, opts).Return(nil, nil).AnyTimes()
tt.Expect(tt.c.CreateRepo(tt.ctx, opts)).To(MatchError(ContainSubstring("error in create repo")), "gitClient.CreateRepo() should fail after 5 tries")
}
func TestGitClientCloneSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Clone(tt.ctx).Return(errors.New("error in clone repo")).Times(4)
tt.g.EXPECT().Clone(tt.ctx).Return(nil).Times(1)
tt.Expect(tt.c.Clone(tt.ctx)).To(Succeed(), "gitClient.Clone() should succeed with 5 tries")
}
func TestGitClientCloneError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Clone(tt.ctx).Return(errors.New("error in clone repo")).Times(5)
tt.g.EXPECT().Clone(tt.ctx).Return(nil).AnyTimes()
tt.Expect(tt.c.Clone(tt.ctx)).To(MatchError(ContainSubstring("error in clone repo")), "gitClient.Clone() should fail after 5 tries")
}
func TestGitClientPushSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Push(tt.ctx).Return(errors.New("error in push repo")).Times(4)
tt.g.EXPECT().Push(tt.ctx).Return(nil).Times(1)
tt.Expect(tt.c.Push(tt.ctx)).To(Succeed(), "gitClient.Push() should succeed with 5 tries")
}
func TestGitClientPushError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Push(tt.ctx).Return(errors.New("error in push repo")).Times(5)
tt.g.EXPECT().Push(tt.ctx).Return(nil).AnyTimes()
tt.Expect(tt.c.Push(tt.ctx)).To(MatchError(ContainSubstring("error in push repo")), "gitClient.Push() should fail after 5 tries")
}
func TestGitClientPullSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Pull(tt.ctx, "").Return(errors.New("error in pull repo")).Times(4)
tt.g.EXPECT().Pull(tt.ctx, "").Return(nil).Times(1)
tt.Expect(tt.c.Pull(tt.ctx, "")).To(Succeed(), "gitClient.Pull() should succeed with 5 tries")
}
func TestGitClientPullError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Pull(tt.ctx, "").Return(errors.New("error in pull repo")).Times(5)
tt.g.EXPECT().Pull(tt.ctx, "").Return(nil).AnyTimes()
tt.Expect(tt.c.Pull(tt.ctx, "")).To(MatchError(ContainSubstring("error in pull repo")), "gitClient.Pull() should fail after 5 tries")
}
func TestGitClientPathExistsSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.p.EXPECT().PathExists(tt.ctx, "", "", "", "").Return(false, errors.New("error in get repo")).Times(4)
tt.p.EXPECT().PathExists(tt.ctx, "", "", "", "").Return(true, nil).Times(1)
exists, err := tt.c.PathExists(tt.ctx, "", "", "", "")
tt.Expect(exists).To(Equal(true))
tt.Expect(err).To(Succeed(), "gitClient.PathExists() should succeed with 5 tries")
}
func TestGitClientPathExistsSkip(t *testing.T) {
tt := newGitClientTest(t)
c := newGitClient(&gitFactory.GitTools{Provider: nil, Client: tt.g})
exists, err := c.PathExists(tt.ctx, "", "", "", "")
tt.Expect(exists).To(Equal(false))
tt.Expect(err).To(Succeed())
}
func TestGitClientPathExistsError(t *testing.T) {
tt := newGitClientTest(t)
tt.p.EXPECT().PathExists(tt.ctx, "", "", "", "").Return(false, errors.New("error in get repo")).Times(5)
tt.p.EXPECT().PathExists(tt.ctx, "", "", "", "").Return(true, nil).AnyTimes()
exists, err := tt.c.PathExists(tt.ctx, "", "", "", "")
tt.Expect(exists).To(Equal(false))
tt.Expect(err).To(MatchError(ContainSubstring("error in get repo")), "gitClient.PathExists() should fail after 5 tries")
}
func TestGitClientAddSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Add("").Return(nil)
tt.Expect(tt.c.Add("")).To(Succeed(), "gitClient.Add() should succeed with 1 try")
}
func TestGitClientAddError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Add("").Return(errors.New("error in add"))
tt.Expect(tt.c.Add("")).To(MatchError(ContainSubstring("error in add")), "gitClient.Add() should fail after 1 try")
}
func TestGitClientRemoveSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Remove("").Return(nil)
tt.Expect(tt.c.Remove("")).To(Succeed(), "gitClient.Remove() should succeed with 1 try")
}
func TestGitClientRemoveError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Remove("").Return(errors.New("error in remove"))
tt.Expect(tt.c.Remove("")).To(MatchError(ContainSubstring("error in remove")), "gitClient.Remove() should fail after 1 try")
}
func TestGitClientCommitSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Commit("").Return(nil)
tt.Expect(tt.c.Commit("")).To(Succeed(), "gitClient.Commit() should succeed with 1 try")
}
func TestGitClientCommitError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Commit("").Return(errors.New("error in commit"))
tt.Expect(tt.c.Commit("")).To(MatchError(ContainSubstring("error in commit")), "gitClient.Commit() should fail after 1 try")
}
func TestGitClientBranchSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Branch("").Return(nil)
tt.Expect(tt.c.Branch("")).To(Succeed(), "gitClient.Branch() should succeed with 1 try")
}
func TestGitClientBranchError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Branch("").Return(errors.New("error in branch"))
tt.Expect(tt.c.Branch("")).To(MatchError(ContainSubstring("error in branch")), "gitClient.Branch() should fail after 1 try")
}
func TestGitClientInitSuccess(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Init().Return(nil)
tt.Expect(tt.c.Init()).To(Succeed(), "gitClient.Init() should succeed with 1 try")
}
func TestGitClientInitError(t *testing.T) {
tt := newGitClientTest(t)
tt.g.EXPECT().Init().Return(errors.New("error in init"))
tt.Expect(tt.c.Init()).To(MatchError(ContainSubstring("error in init")), "gitClient.Init() should fail after 1 try")
}
| 242 |
eks-anywhere | aws | Go | package flux
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
)
const upgradeFluxconfigCommitMessage = "Upgrade commit of flux configuration; generated by EKS-A CLI"
func (f *Flux) Upgrade(ctx context.Context, managementCluster *types.Cluster, currentSpec *cluster.Spec, newSpec *cluster.Spec) (*types.ChangeDiff, error) {
logger.V(1).Info("Checking for Flux upgrades")
changeDiff := FluxChangeDiff(currentSpec, newSpec)
if changeDiff == nil {
logger.V(1).Info("Nothing to upgrade for Flux")
return nil, nil
}
logger.V(1).Info("Starting Flux upgrades")
if err := f.upgradeFilesAndCommit(ctx, newSpec); err != nil {
return nil, fmt.Errorf("upgrading Flux from bundles %d to bundles %d: %v", currentSpec.Bundles.Spec.Number, newSpec.Bundles.Spec.Number, err)
}
if err := f.fluxClient.DeleteSystemSecret(ctx, managementCluster, newSpec.FluxConfig.Spec.SystemNamespace); err != nil {
return nil, fmt.Errorf("upgrading Flux when deleting old flux-system secret: %v", err)
}
if err := f.BootstrapGithub(ctx, managementCluster, newSpec); err != nil {
return nil, fmt.Errorf("upgrading Flux components with github provider: %v", err)
}
if err := f.BootstrapGit(ctx, managementCluster, newSpec); err != nil {
return nil, fmt.Errorf("upgrading Flux components with git provider: %v", err)
}
if err := f.fluxClient.Reconcile(ctx, managementCluster, newSpec.FluxConfig); err != nil {
return nil, fmt.Errorf("reconciling Flux components: %v", err)
}
return changeDiff, nil
}
func FluxChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ChangeDiff {
if !newSpec.Cluster.IsSelfManaged() {
logger.V(1).Info("Skipping Flux upgrades, not a self-managed cluster")
return nil
}
if currentSpec.Cluster.Spec.GitOpsRef == nil && newSpec.Cluster.Spec.GitOpsRef != nil {
logger.V(1).Info("Skipping Flux upgrades, no previous flux installed in the cluster")
return nil
}
if currentSpec.Cluster.Spec.GitOpsRef == nil {
logger.V(1).Info("Skipping Flux upgrades, GitOps not enabled")
return nil
}
oldVersion := currentSpec.VersionsBundle.Flux.Version
newVersion := newSpec.VersionsBundle.Flux.Version
if oldVersion != newVersion {
logger.V(1).Info("Flux change diff ", "oldVersion ", oldVersion, "newVersion ", newVersion)
return &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
{
ComponentName: "Flux",
NewVersion: newVersion,
OldVersion: oldVersion,
},
},
}
}
return nil
}
func (f *Flux) Install(ctx context.Context, cluster *types.Cluster, oldSpec, newSpec *cluster.Spec) error {
if oldSpec.Cluster.Spec.GitOpsRef == nil && newSpec.Cluster.Spec.GitOpsRef != nil {
return f.InstallGitOps(ctx, cluster, newSpec, nil, nil)
}
return nil
}
func (f *Flux) upgradeFilesAndCommit(ctx context.Context, newSpec *cluster.Spec) error {
fc := &fluxForCluster{
Flux: f,
clusterSpec: newSpec,
}
if err := fc.syncGitRepo(ctx); err != nil {
return err
}
if err := fc.commitFluxUpgradeFilesToGit(ctx); err != nil {
return err
}
return nil
}
func (fc *fluxForCluster) commitFluxUpgradeFilesToGit(ctx context.Context) error {
logger.Info("Adding flux configuration files to Git")
g := NewFileGenerator()
if err := g.Init(fc.writer, fc.eksaSystemDir(), fc.fluxSystemDir()); err != nil {
return err
}
if err := g.WriteFluxPatch(fc.clusterSpec); err != nil {
return err
}
if err := fc.gitClient.Add(fc.path()); err != nil {
return fmt.Errorf("adding %s to git: %v", fc.path(), err)
}
if err := fc.Flux.pushToRemoteRepo(ctx, fc.path(), upgradeFluxconfigCommitMessage); err != nil {
return err
}
logger.V(3).Info("Finished pushing flux custom manifest files to git",
"repository", fc.repository())
return nil
}
| 120 |
eks-anywhere | aws | Go | package flux_test
import (
"context"
"errors"
"fmt"
"os"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/gitops/flux"
"github.com/aws/eks-anywhere/pkg/types"
)
type upgraderTest struct {
*WithT
ctx context.Context
currentSpec *cluster.Spec
newSpec *cluster.Spec
cluster *types.Cluster
fluxConfig v1alpha1.FluxConfig
}
func newUpgraderTest(t *testing.T) *upgraderTest {
currentSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Bundles.Spec.Number = 1
s.VersionsBundle.Flux.Version = "v0.1.0"
s.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "management-cluster",
},
Spec: v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "testGitOpsRef",
},
},
}
})
return &upgraderTest{
WithT: NewWithT(t),
ctx: context.Background(),
currentSpec: currentSpec,
newSpec: currentSpec.DeepCopy(),
cluster: &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "k.kubeconfig",
},
fluxConfig: v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "flux-system",
ClusterConfigPath: "clusters/management-cluster",
Branch: "testBranch",
Github: &v1alpha1.GithubProviderConfig{
Owner: "mFowler",
Repository: "testRepo",
Personal: true,
},
Git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "",
},
},
},
}
}
func TestFluxUpgradeNoSelfManaged(t *testing.T) {
tt := newUpgraderTest(t)
g := newFluxTest(t)
tt.newSpec.Cluster.SetManagedBy("management-cluster")
tt.Expect(g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestFluxUpgradeNoChanges(t *testing.T) {
tt := newUpgraderTest(t)
g := newFluxTest(t)
tt.newSpec.VersionsBundle.Flux.Version = "v0.1.0"
tt.Expect(g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestFluxUpgradeSuccess(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.VersionsBundle.Flux.Version = "v0.2.0"
tt.newSpec.FluxConfig = &tt.fluxConfig
g := newFluxTest(t)
if err := setupTestFiles(t, g.writer); err != nil {
t.Errorf("setting up files: %v", err)
}
wantDiff := &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
{
ComponentName: "Flux",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
},
}
g.git.EXPECT().Clone(tt.ctx).Return(nil)
g.git.EXPECT().Branch(tt.fluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(tt.fluxConfig.Spec.ClusterConfigPath).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(tt.ctx).Return(nil)
g.flux.EXPECT().DeleteSystemSecret(tt.ctx, tt.cluster, tt.newSpec.FluxConfig.Spec.SystemNamespace)
g.flux.EXPECT().BootstrapGithub(tt.ctx, tt.cluster, tt.newSpec.FluxConfig)
g.flux.EXPECT().BootstrapGit(tt.ctx, tt.cluster, tt.newSpec.FluxConfig, nil)
g.flux.EXPECT().Reconcile(tt.ctx, tt.cluster, tt.newSpec.FluxConfig)
tt.Expect(g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(Equal(wantDiff))
}
func TestFluxUpgradeBootstrapGithubError(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.VersionsBundle.Flux.Version = "v0.2.0"
tt.newSpec.FluxConfig = &tt.fluxConfig
g := newFluxTest(t)
if err := setupTestFiles(t, g.writer); err != nil {
t.Errorf("setting up files: %v", err)
}
g.git.EXPECT().Clone(tt.ctx).Return(nil)
g.git.EXPECT().Branch(tt.fluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(tt.fluxConfig.Spec.ClusterConfigPath).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(tt.ctx).Return(nil)
g.flux.EXPECT().DeleteSystemSecret(tt.ctx, tt.cluster, tt.newSpec.FluxConfig.Spec.SystemNamespace)
g.flux.EXPECT().BootstrapGithub(tt.ctx, tt.cluster, tt.newSpec.FluxConfig).Return(errors.New("error from client"))
_, err := g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)
tt.Expect(err).NotTo(BeNil())
}
func TestFluxUpgradeBootstrapGitError(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.VersionsBundle.Flux.Version = "v0.2.0"
tt.newSpec.FluxConfig = &tt.fluxConfig
g := newFluxTest(t)
if err := setupTestFiles(t, g.writer); err != nil {
t.Errorf("setting up files: %v", err)
}
g.git.EXPECT().Clone(tt.ctx).Return(nil)
g.git.EXPECT().Branch(tt.fluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(tt.fluxConfig.Spec.ClusterConfigPath).Return(nil)
g.git.EXPECT().Commit(test.OfType("string")).Return(nil)
g.git.EXPECT().Push(tt.ctx).Return(nil)
g.flux.EXPECT().DeleteSystemSecret(tt.ctx, tt.cluster, tt.newSpec.FluxConfig.Spec.SystemNamespace)
g.flux.EXPECT().BootstrapGithub(tt.ctx, tt.cluster, tt.newSpec.FluxConfig)
g.flux.EXPECT().BootstrapGit(tt.ctx, tt.cluster, tt.newSpec.FluxConfig, nil).Return(errors.New("error in bootstrap git"))
_, err := g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)
tt.Expect(err).To(MatchError(ContainSubstring("error in bootstrap git")))
}
func TestFluxUpgradeAddError(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.VersionsBundle.Flux.Version = "v0.2.0"
tt.newSpec.FluxConfig = &tt.fluxConfig
g := newFluxTest(t)
if err := setupTestFiles(t, g.writer); err != nil {
t.Errorf("setting up files: %v", err)
}
g.git.EXPECT().Clone(tt.ctx).Return(nil)
g.git.EXPECT().Branch(tt.fluxConfig.Spec.Branch).Return(nil)
g.git.EXPECT().Add(tt.fluxConfig.Spec.ClusterConfigPath).Return(errors.New("error in add"))
_, err := g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)
tt.Expect(err).To(MatchError(ContainSubstring("error in add")))
}
func TestFluxUpgradeNoGitOpsConfig(t *testing.T) {
tt := newUpgraderTest(t)
g := newFluxTest(t)
tt.newSpec.FluxConfig = nil
tt.Expect(g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestFluxUpgradeNewGitOpsConfig(t *testing.T) {
tt := newUpgraderTest(t)
g := newFluxTest(t)
tt.currentSpec.Cluster.Spec.GitOpsRef = nil
tt.Expect(g.gitOpsFlux.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func setupTestFiles(t *testing.T, writer filewriter.FileWriter) error {
w, err := writer.WithDir("clusters/management-cluster/management-cluster/eksa-system")
if err != nil {
return fmt.Errorf("failed to create test eksa-system directory: %v", err)
}
eksaContent, err := os.ReadFile("./testdata/cluster-config-default-path-management.yaml")
if err != nil {
return fmt.Errorf("File [%s] reading error in test: %v", "cluster-config-default-path-management.yaml", err)
}
_, err = w.Write(defaultEksaClusterConfigFileName, eksaContent, filewriter.PersistentFile)
if err != nil {
return fmt.Errorf("failed to write eksa-cluster.yaml in test: %v", err)
}
return nil
}
func TestInstallSuccess(t *testing.T) {
tt := newUpgraderTest(t)
c := flux.NewFlux(nil, nil, nil, nil)
tt.currentSpec.Cluster.Spec.GitOpsRef = nil
tt.Expect(c.Install(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestInstallSkip(t *testing.T) {
tests := []struct {
name string
new, old *v1alpha1.Ref
}{
{
name: "gitops ref removed",
new: nil,
old: &v1alpha1.Ref{Name: "name"},
},
{
name: "gitops ref not exists",
new: nil,
old: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
test := newUpgraderTest(t)
g := newFluxTest(t)
test.currentSpec.Cluster.Spec.GitOpsRef = tt.old
test.newSpec.Cluster.Spec.GitOpsRef = tt.new
test.Expect(g.gitOpsFlux.Install(test.ctx, test.cluster, test.currentSpec, test.newSpec)).To(BeNil())
})
}
}
| 258 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/gitops/flux (interfaces: FluxClient,KubeClient,GitOpsFluxClient,GitClient,Templater)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
config "github.com/aws/eks-anywhere/pkg/config"
executables "github.com/aws/eks-anywhere/pkg/executables"
filewriter "github.com/aws/eks-anywhere/pkg/filewriter"
git "github.com/aws/eks-anywhere/pkg/git"
types "github.com/aws/eks-anywhere/pkg/types"
gomock "github.com/golang/mock/gomock"
)
// MockFluxClient is a mock of FluxClient interface.
type MockFluxClient struct {
ctrl *gomock.Controller
recorder *MockFluxClientMockRecorder
}
// MockFluxClientMockRecorder is the mock recorder for MockFluxClient.
type MockFluxClientMockRecorder struct {
mock *MockFluxClient
}
// NewMockFluxClient creates a new mock instance.
func NewMockFluxClient(ctrl *gomock.Controller) *MockFluxClient {
mock := &MockFluxClient{ctrl: ctrl}
mock.recorder = &MockFluxClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFluxClient) EXPECT() *MockFluxClientMockRecorder {
return m.recorder
}
// BootstrapGit mocks base method.
func (m *MockFluxClient) BootstrapGit(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig, arg3 *config.CliConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BootstrapGit", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// BootstrapGit indicates an expected call of BootstrapGit.
func (mr *MockFluxClientMockRecorder) BootstrapGit(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapGit", reflect.TypeOf((*MockFluxClient)(nil).BootstrapGit), arg0, arg1, arg2, arg3)
}
// BootstrapGithub mocks base method.
func (m *MockFluxClient) BootstrapGithub(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BootstrapGithub", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// BootstrapGithub indicates an expected call of BootstrapGithub.
func (mr *MockFluxClientMockRecorder) BootstrapGithub(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapGithub", reflect.TypeOf((*MockFluxClient)(nil).BootstrapGithub), arg0, arg1, arg2)
}
// Reconcile mocks base method.
func (m *MockFluxClient) Reconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockFluxClientMockRecorder) Reconcile(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockFluxClient)(nil).Reconcile), arg0, arg1, arg2)
}
// Uninstall mocks base method.
func (m *MockFluxClient) Uninstall(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Uninstall", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Uninstall indicates an expected call of Uninstall.
func (mr *MockFluxClientMockRecorder) Uninstall(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uninstall", reflect.TypeOf((*MockFluxClient)(nil).Uninstall), arg0, arg1, arg2)
}
// MockKubeClient is a mock of KubeClient interface.
type MockKubeClient struct {
ctrl *gomock.Controller
recorder *MockKubeClientMockRecorder
}
// MockKubeClientMockRecorder is the mock recorder for MockKubeClient.
type MockKubeClientMockRecorder struct {
mock *MockKubeClient
}
// NewMockKubeClient creates a new mock instance.
func NewMockKubeClient(ctrl *gomock.Controller) *MockKubeClient {
mock := &MockKubeClient{ctrl: ctrl}
mock.recorder = &MockKubeClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubeClient) EXPECT() *MockKubeClientMockRecorder {
return m.recorder
}
// DeleteSecret mocks base method.
func (m *MockKubeClient) DeleteSecret(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSecret", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSecret indicates an expected call of DeleteSecret.
func (mr *MockKubeClientMockRecorder) DeleteSecret(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecret", reflect.TypeOf((*MockKubeClient)(nil).DeleteSecret), arg0, arg1, arg2, arg3)
}
// GetEksaCluster mocks base method.
func (m *MockKubeClient) GetEksaCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*v1alpha1.Cluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaCluster", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1alpha1.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaCluster indicates an expected call of GetEksaCluster.
func (mr *MockKubeClientMockRecorder) GetEksaCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockKubeClient)(nil).GetEksaCluster), arg0, arg1, arg2)
}
// RemoveAnnotation mocks base method.
func (m *MockKubeClient) RemoveAnnotation(arg0 context.Context, arg1, arg2, arg3 string, arg4 ...executables.KubectlOpt) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2, arg3}
for _, a := range arg4 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "RemoveAnnotation", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// RemoveAnnotation indicates an expected call of RemoveAnnotation.
func (mr *MockKubeClientMockRecorder) RemoveAnnotation(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAnnotation", reflect.TypeOf((*MockKubeClient)(nil).RemoveAnnotation), varargs...)
}
// UpdateAnnotation mocks base method.
func (m *MockKubeClient) UpdateAnnotation(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 ...executables.KubectlOpt) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2, arg3}
for _, a := range arg4 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateAnnotation", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateAnnotation indicates an expected call of UpdateAnnotation.
func (mr *MockKubeClientMockRecorder) UpdateAnnotation(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotation", reflect.TypeOf((*MockKubeClient)(nil).UpdateAnnotation), varargs...)
}
// MockGitOpsFluxClient is a mock of GitOpsFluxClient interface.
type MockGitOpsFluxClient struct {
ctrl *gomock.Controller
recorder *MockGitOpsFluxClientMockRecorder
}
// MockGitOpsFluxClientMockRecorder is the mock recorder for MockGitOpsFluxClient.
type MockGitOpsFluxClientMockRecorder struct {
mock *MockGitOpsFluxClient
}
// NewMockGitOpsFluxClient creates a new mock instance.
func NewMockGitOpsFluxClient(ctrl *gomock.Controller) *MockGitOpsFluxClient {
mock := &MockGitOpsFluxClient{ctrl: ctrl}
mock.recorder = &MockGitOpsFluxClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGitOpsFluxClient) EXPECT() *MockGitOpsFluxClientMockRecorder {
return m.recorder
}
// BootstrapGit mocks base method.
func (m *MockGitOpsFluxClient) BootstrapGit(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig, arg3 *config.CliConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BootstrapGit", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// BootstrapGit indicates an expected call of BootstrapGit.
func (mr *MockGitOpsFluxClientMockRecorder) BootstrapGit(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapGit", reflect.TypeOf((*MockGitOpsFluxClient)(nil).BootstrapGit), arg0, arg1, arg2, arg3)
}
// BootstrapGithub mocks base method.
func (m *MockGitOpsFluxClient) BootstrapGithub(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BootstrapGithub", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// BootstrapGithub indicates an expected call of BootstrapGithub.
func (mr *MockGitOpsFluxClientMockRecorder) BootstrapGithub(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapGithub", reflect.TypeOf((*MockGitOpsFluxClient)(nil).BootstrapGithub), arg0, arg1, arg2)
}
// DeleteSystemSecret mocks base method.
func (m *MockGitOpsFluxClient) DeleteSystemSecret(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSystemSecret", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSystemSecret indicates an expected call of DeleteSystemSecret.
func (mr *MockGitOpsFluxClientMockRecorder) DeleteSystemSecret(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSystemSecret", reflect.TypeOf((*MockGitOpsFluxClient)(nil).DeleteSystemSecret), arg0, arg1, arg2)
}
// DisableResourceReconcile mocks base method.
func (m *MockGitOpsFluxClient) DisableResourceReconcile(arg0 context.Context, arg1 *types.Cluster, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DisableResourceReconcile", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// DisableResourceReconcile indicates an expected call of DisableResourceReconcile.
func (mr *MockGitOpsFluxClientMockRecorder) DisableResourceReconcile(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableResourceReconcile", reflect.TypeOf((*MockGitOpsFluxClient)(nil).DisableResourceReconcile), arg0, arg1, arg2, arg3, arg4)
}
// EnableResourceReconcile mocks base method.
func (m *MockGitOpsFluxClient) EnableResourceReconcile(arg0 context.Context, arg1 *types.Cluster, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnableResourceReconcile", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// EnableResourceReconcile indicates an expected call of EnableResourceReconcile.
func (mr *MockGitOpsFluxClientMockRecorder) EnableResourceReconcile(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableResourceReconcile", reflect.TypeOf((*MockGitOpsFluxClient)(nil).EnableResourceReconcile), arg0, arg1, arg2, arg3, arg4)
}
// ForceReconcile mocks base method.
func (m *MockGitOpsFluxClient) ForceReconcile(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ForceReconcile", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ForceReconcile indicates an expected call of ForceReconcile.
func (mr *MockGitOpsFluxClientMockRecorder) ForceReconcile(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceReconcile", reflect.TypeOf((*MockGitOpsFluxClient)(nil).ForceReconcile), arg0, arg1, arg2)
}
// GetCluster mocks base method.
func (m *MockGitOpsFluxClient) GetCluster(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) (*v1alpha1.Cluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCluster", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1alpha1.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCluster indicates an expected call of GetCluster.
func (mr *MockGitOpsFluxClientMockRecorder) GetCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCluster", reflect.TypeOf((*MockGitOpsFluxClient)(nil).GetCluster), arg0, arg1, arg2)
}
// Reconcile mocks base method.
func (m *MockGitOpsFluxClient) Reconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockGitOpsFluxClientMockRecorder) Reconcile(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockGitOpsFluxClient)(nil).Reconcile), arg0, arg1, arg2)
}
// Uninstall mocks base method.
func (m *MockGitOpsFluxClient) Uninstall(arg0 context.Context, arg1 *types.Cluster, arg2 *v1alpha1.FluxConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Uninstall", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Uninstall indicates an expected call of Uninstall.
func (mr *MockGitOpsFluxClientMockRecorder) Uninstall(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uninstall", reflect.TypeOf((*MockGitOpsFluxClient)(nil).Uninstall), arg0, arg1, arg2)
}
// MockGitClient is a mock of GitClient interface.
type MockGitClient struct {
ctrl *gomock.Controller
recorder *MockGitClientMockRecorder
}
// MockGitClientMockRecorder is the mock recorder for MockGitClient.
type MockGitClientMockRecorder struct {
mock *MockGitClient
}
// NewMockGitClient creates a new mock instance.
func NewMockGitClient(ctrl *gomock.Controller) *MockGitClient {
mock := &MockGitClient{ctrl: ctrl}
mock.recorder = &MockGitClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGitClient) EXPECT() *MockGitClientMockRecorder {
return m.recorder
}
// Add mocks base method.
func (m *MockGitClient) Add(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Add", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Add indicates an expected call of Add.
func (mr *MockGitClientMockRecorder) Add(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockGitClient)(nil).Add), arg0)
}
// Branch mocks base method.
func (m *MockGitClient) Branch(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Branch", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Branch indicates an expected call of Branch.
func (mr *MockGitClientMockRecorder) Branch(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Branch", reflect.TypeOf((*MockGitClient)(nil).Branch), arg0)
}
// Clone mocks base method.
func (m *MockGitClient) Clone(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Clone", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Clone indicates an expected call of Clone.
func (mr *MockGitClientMockRecorder) Clone(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clone", reflect.TypeOf((*MockGitClient)(nil).Clone), arg0)
}
// Commit mocks base method.
func (m *MockGitClient) Commit(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Commit indicates an expected call of Commit.
func (mr *MockGitClientMockRecorder) Commit(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockGitClient)(nil).Commit), arg0)
}
// CreateRepo mocks base method.
func (m *MockGitClient) CreateRepo(arg0 context.Context, arg1 git.CreateRepoOpts) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRepo", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateRepo indicates an expected call of CreateRepo.
func (mr *MockGitClientMockRecorder) CreateRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRepo", reflect.TypeOf((*MockGitClient)(nil).CreateRepo), arg0, arg1)
}
// GetRepo mocks base method.
func (m *MockGitClient) GetRepo(arg0 context.Context) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRepo", arg0)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetRepo indicates an expected call of GetRepo.
func (mr *MockGitClientMockRecorder) GetRepo(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRepo", reflect.TypeOf((*MockGitClient)(nil).GetRepo), arg0)
}
// Init mocks base method.
func (m *MockGitClient) Init() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Init")
ret0, _ := ret[0].(error)
return ret0
}
// Init indicates an expected call of Init.
func (mr *MockGitClientMockRecorder) Init() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockGitClient)(nil).Init))
}
// PathExists mocks base method.
func (m *MockGitClient) PathExists(arg0 context.Context, arg1, arg2, arg3, arg4 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PathExists", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PathExists indicates an expected call of PathExists.
func (mr *MockGitClientMockRecorder) PathExists(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PathExists", reflect.TypeOf((*MockGitClient)(nil).PathExists), arg0, arg1, arg2, arg3, arg4)
}
// Pull mocks base method.
func (m *MockGitClient) Pull(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Pull", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Pull indicates an expected call of Pull.
func (mr *MockGitClientMockRecorder) Pull(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pull", reflect.TypeOf((*MockGitClient)(nil).Pull), arg0, arg1)
}
// Push mocks base method.
func (m *MockGitClient) Push(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Push", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Push indicates an expected call of Push.
func (mr *MockGitClientMockRecorder) Push(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockGitClient)(nil).Push), arg0)
}
// Remove mocks base method.
func (m *MockGitClient) Remove(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Remove", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Remove indicates an expected call of Remove.
func (mr *MockGitClientMockRecorder) Remove(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockGitClient)(nil).Remove), arg0)
}
// MockTemplater is a mock of Templater interface.
type MockTemplater struct {
ctrl *gomock.Controller
recorder *MockTemplaterMockRecorder
}
// MockTemplaterMockRecorder is the mock recorder for MockTemplater.
type MockTemplaterMockRecorder struct {
mock *MockTemplater
}
// NewMockTemplater creates a new mock instance.
func NewMockTemplater(ctrl *gomock.Controller) *MockTemplater {
mock := &MockTemplater{ctrl: ctrl}
mock.recorder = &MockTemplaterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockTemplater) EXPECT() *MockTemplaterMockRecorder {
return m.recorder
}
// WriteToFile mocks base method.
func (m *MockTemplater) WriteToFile(arg0 string, arg1 interface{}, arg2 string, arg3 ...filewriter.FileOptionsFunc) (string, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "WriteToFile", varargs...)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WriteToFile indicates an expected call of WriteToFile.
func (mr *MockTemplaterMockRecorder) WriteToFile(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteToFile", reflect.TypeOf((*MockTemplater)(nil).WriteToFile), varargs...)
}
| 561 |
eks-anywhere | aws | Go | package govmomi
import (
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25"
)
type vMOMIAuthorizationManagerBuilder struct{}
func (*vMOMIAuthorizationManagerBuilder) Build(c *vim25.Client) *object.AuthorizationManager {
return object.NewAuthorizationManager(c)
}
| 13 |
eks-anywhere | aws | Go | package govmomi
import (
"context"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
)
const (
VSphereTypeFolder = "Folder"
VSphereTypeNetwork = "Network"
VSphereTypeResourcePool = "ResourcePool"
VSphereTypeDatastore = "Datastore"
VSphereTypeVirtualMachine = "VirtualMachine"
)
type VMOMIAuthorizationManager interface {
FetchUserPrivilegeOnEntities(ctx context.Context, entities []types.ManagedObjectReference, userName string) ([]types.UserPrivilegeResult, error)
}
type VMOMIFinder interface {
Datastore(ctx context.Context, path string) (*object.Datastore, error)
Folder(ctx context.Context, path string) (*object.Folder, error)
Network(ctx context.Context, path string) (object.NetworkReference, error)
ResourcePool(ctx context.Context, path string) (*object.ResourcePool, error)
VirtualMachine(ctx context.Context, path string) (*object.VirtualMachine, error)
Datacenter(ctx context.Context, path string) (*object.Datacenter, error)
SetDatacenter(dc *object.Datacenter) *find.Finder
}
type VMOMIClient struct {
Gcvm *govmomi.Client
Finder VMOMIFinder
username string
AuthorizationManager VMOMIAuthorizationManager
}
func NewVMOMIClientCustom(gcvm *govmomi.Client, f VMOMIFinder, username string, am VMOMIAuthorizationManager) *VMOMIClient {
return &VMOMIClient{
Gcvm: gcvm,
Finder: f,
username: username,
AuthorizationManager: am,
}
}
func (vsc *VMOMIClient) Username() string {
return vsc.username
}
func (vsc *VMOMIClient) GetPrivsOnEntity(ctx context.Context, path string, objType string, username string) ([]string, error) {
var vSphereObjectReference types.ManagedObjectReference
emptyResult := []string{}
var err error
switch objType {
case VSphereTypeFolder:
vSphereObjectReference, err = vsc.getFolder(ctx, path)
case VSphereTypeNetwork:
vSphereObjectReference, err = vsc.getNetwork(ctx, path)
case VSphereTypeDatastore:
vSphereObjectReference, err = vsc.getDatastore(ctx, path)
case VSphereTypeResourcePool:
vSphereObjectReference, err = vsc.getResourcePool(ctx, path)
case VSphereTypeVirtualMachine:
vSphereObjectReference, err = vsc.getVirtualMachine(ctx, path)
}
if err != nil {
return emptyResult, err
}
refs := []types.ManagedObjectReference{vSphereObjectReference}
result, err := vsc.AuthorizationManager.FetchUserPrivilegeOnEntities(ctx, refs, username)
if err != nil {
return emptyResult, err
}
if len(result) > 0 {
return result[0].Privileges, nil
} else {
return emptyResult, nil
}
}
func (vsc *VMOMIClient) getFolder(ctx context.Context, path string) (types.ManagedObjectReference, error) {
obj, err := vsc.Finder.Folder(ctx, path)
if err != nil {
return types.ManagedObjectReference{}, err
} else {
return obj.Common.Reference(), nil
}
}
func (vsc *VMOMIClient) getNetwork(ctx context.Context, path string) (types.ManagedObjectReference, error) {
obj, err := vsc.Finder.Network(ctx, path)
if err != nil {
return types.ManagedObjectReference{}, err
} else {
return obj.Reference(), nil
}
}
func (vsc *VMOMIClient) getDatastore(ctx context.Context, path string) (types.ManagedObjectReference, error) {
obj, err := vsc.Finder.Datastore(ctx, path)
if err != nil {
return types.ManagedObjectReference{}, err
} else {
return obj.Common.Reference(), nil
}
}
func (vsc *VMOMIClient) getResourcePool(ctx context.Context, path string) (types.ManagedObjectReference, error) {
obj, err := vsc.Finder.ResourcePool(ctx, path)
if err != nil {
return types.ManagedObjectReference{}, err
} else {
return obj.Common.Reference(), nil
}
}
func (vsc *VMOMIClient) getVirtualMachine(ctx context.Context, path string) (types.ManagedObjectReference, error) {
obj, err := vsc.Finder.VirtualMachine(ctx, path)
if err != nil {
return types.ManagedObjectReference{}, err
} else {
return obj.Common.Reference(), nil
}
}
| 134 |
eks-anywhere | aws | Go | package govmomi
import (
"context"
"net/url"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/soap"
)
type VSphereClient interface {
Username() string
GetPrivsOnEntity(ctx context.Context, path string, objType string, username string) ([]string, error)
}
type VMOMIFinderBuilder interface {
Build(arg0 *vim25.Client, arg1 ...bool) VMOMIFinder
}
type VMOMISessionBuilder interface {
Build(ctx context.Context, u *url.URL, insecure bool) (*govmomi.Client, error)
}
type VMOMIAuthorizationManagerBuilder interface {
Build(c *vim25.Client) *object.AuthorizationManager
}
type vMOMIClientBuilder struct {
vfb VMOMIFinderBuilder
gcb VMOMISessionBuilder
amb VMOMIAuthorizationManagerBuilder
}
func NewVMOMIClientBuilder() *vMOMIClientBuilder {
return &vMOMIClientBuilder{vfb: &vMOMIFinderBuilder{}, gcb: &vMOMISessionBuilder{}, amb: &vMOMIAuthorizationManagerBuilder{}}
}
func NewVMOMIClientBuilderOverride(vfb VMOMIFinderBuilder, gcb VMOMISessionBuilder, amb VMOMIAuthorizationManagerBuilder) *vMOMIClientBuilder {
return &vMOMIClientBuilder{vfb: vfb, gcb: gcb, amb: amb}
}
func (vcb *vMOMIClientBuilder) Build(ctx context.Context, host string, username string, password string, insecure bool, datacenter string) (VSphereClient, error) {
u, err := soap.ParseURL(host)
u.User = url.UserPassword(username, password)
if err != nil {
return nil, err
}
// start gvmc
gvmc, err := vcb.gcb.Build(ctx, u, insecure)
if err != nil {
return nil, err
}
f := vcb.vfb.Build(gvmc.Client, true)
dc, err := f.Datacenter(ctx, datacenter)
if err != nil {
return nil, err
}
f.SetDatacenter(dc)
am := vcb.amb.Build(gvmc.Client)
return &VMOMIClient{gvmc, f, username, am}, nil
}
| 70 |
eks-anywhere | aws | Go | package govmomi_test
import (
"context"
"fmt"
"reflect"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
govmomi_internal "github.com/vmware/govmomi"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/types"
"github.com/aws/eks-anywhere/pkg/govmomi"
"github.com/aws/eks-anywhere/pkg/govmomi/mocks"
)
type fields struct {
AuthorizationManager *mocks.MockVMOMIAuthorizationManager
Finder *mocks.MockVMOMIFinder
Path string
}
func TestGetPrivsOnEntity(t *testing.T) {
ctx := context.Background()
username := "foobar"
wantPrivs := []string{"DoManyThings", "DoFewThings"}
results := []types.UserPrivilegeResult{
{
Privileges: wantPrivs,
},
}
errMsg := "No entity found"
tests := []struct {
name string
objType string
path string
// prepare lets us initialize our mocks within the `tests` slice. Oftentimes it also proves useful for other initialization
prepare func(f *fields)
wantPrivs []string
wantErr string
}{
{
name: "test folder call happy path",
objType: govmomi.VSphereTypeFolder,
path: "Datacenter/vm/my/directory",
wantPrivs: wantPrivs,
wantErr: "",
prepare: func(f *fields) {
obj := object.Folder{}
objRefs := []types.ManagedObjectReference{obj.Common.Reference()}
f.AuthorizationManager.EXPECT().FetchUserPrivilegeOnEntities(ctx, objRefs, username).Return(results, nil)
f.Finder.EXPECT().Folder(ctx, f.Path).Return(&obj, nil)
},
},
{
name: "test datastore call happy path",
objType: govmomi.VSphereTypeDatastore,
path: "Datacenter/datastore/LargeDatastore1",
wantPrivs: wantPrivs,
wantErr: "",
prepare: func(f *fields) {
obj := object.Datastore{}
objRefs := []types.ManagedObjectReference{obj.Common.Reference()}
f.AuthorizationManager.EXPECT().FetchUserPrivilegeOnEntities(ctx, objRefs, username).Return(results, nil)
f.Finder.EXPECT().Datastore(ctx, f.Path).Return(&obj, nil)
},
},
{
name: "test resource pool call happy path",
objType: govmomi.VSphereTypeResourcePool,
path: "Datacenter/host/cluster-02/MyResourcePool",
wantPrivs: wantPrivs,
wantErr: "",
prepare: func(f *fields) {
obj := object.ResourcePool{}
objRefs := []types.ManagedObjectReference{obj.Common.Reference()}
f.AuthorizationManager.EXPECT().FetchUserPrivilegeOnEntities(ctx, objRefs, username).Return(results, nil)
f.Finder.EXPECT().ResourcePool(ctx, f.Path).Return(&obj, nil)
},
},
{
name: "test virtual machine call happy path",
objType: govmomi.VSphereTypeVirtualMachine,
path: "Datacenter/vm/Templates/MyVMTemplate",
wantPrivs: wantPrivs,
wantErr: "",
prepare: func(f *fields) {
obj := object.VirtualMachine{}
objRefs := []types.ManagedObjectReference{obj.Common.Reference()}
f.AuthorizationManager.EXPECT().FetchUserPrivilegeOnEntities(ctx, objRefs, username).Return(results, nil)
f.Finder.EXPECT().VirtualMachine(ctx, f.Path).Return(&obj, nil)
},
},
{
name: "test network call happy path",
objType: govmomi.VSphereTypeNetwork,
path: "Datacenter/network/VM Network",
wantPrivs: wantPrivs,
wantErr: "",
prepare: func(f *fields) {
obj := object.Network{}
objRefs := []types.ManagedObjectReference{obj.Reference()}
f.AuthorizationManager.EXPECT().FetchUserPrivilegeOnEntities(ctx, objRefs, username).Return(results, nil)
f.Finder.EXPECT().Network(ctx, f.Path).Return(&obj, nil)
},
},
{
name: "test network call missing object",
objType: govmomi.VSphereTypeNetwork,
path: "Datacenter/network/VM Network",
wantPrivs: []string{},
wantErr: errMsg,
prepare: func(f *fields) {
f.Finder.EXPECT().Network(ctx, f.Path).Return(nil, fmt.Errorf(errMsg))
},
},
{
name: "test virtual machine call no privs",
objType: govmomi.VSphereTypeVirtualMachine,
path: "Datacenter/vm/Templates/MyVMTemplate",
wantPrivs: []string{},
wantErr: errMsg,
prepare: func(f *fields) {
obj := object.VirtualMachine{}
objRefs := []types.ManagedObjectReference{obj.Common.Reference()}
f.AuthorizationManager.EXPECT().FetchUserPrivilegeOnEntities(ctx, objRefs, username).Return(nil, fmt.Errorf(errMsg))
f.Finder.EXPECT().VirtualMachine(ctx, f.Path).Return(&obj, nil)
},
},
{
name: "test resource pool call missing object",
objType: govmomi.VSphereTypeResourcePool,
path: "Datacenter/host/cluster-02/MyResourcePool",
wantPrivs: []string{},
wantErr: errMsg,
prepare: func(f *fields) {
f.Finder.EXPECT().ResourcePool(ctx, f.Path).Return(nil, fmt.Errorf(errMsg))
},
},
{
name: "test folder call empty object results",
objType: govmomi.VSphereTypeFolder,
path: "Datacenter/vm/my/directory",
wantPrivs: []string{},
wantErr: "",
prepare: func(f *fields) {
obj := object.Folder{}
objRefs := []types.ManagedObjectReference{obj.Common.Reference()}
f.AuthorizationManager.EXPECT().FetchUserPrivilegeOnEntities(ctx, objRefs, username).Return(nil, nil)
f.Finder.EXPECT().Folder(ctx, f.Path).Return(&obj, nil)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
am := mocks.NewMockVMOMIAuthorizationManager(ctrl)
finder := mocks.NewMockVMOMIFinder(ctrl)
f := &fields{
AuthorizationManager: am,
Finder: finder,
Path: tt.path,
}
tt.prepare(f)
g := NewWithT(t)
vsc := govmomi.NewVMOMIClientCustom(nil, finder, username, am)
privs, err := vsc.GetPrivsOnEntity(ctx, tt.path, tt.objType, username)
if tt.wantErr == "" {
g.Expect(err).To(Succeed())
if !reflect.DeepEqual(privs, tt.wantPrivs) {
t.Fatalf("privs = %v, want %v", privs, wantPrivs)
}
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
},
)
}
}
func TestVMOMISessionBuilderBuild(t *testing.T) {
insecure := false
datacenter := "mydatacenter"
datacenterObject := object.Datacenter{}
ctx := context.Background()
ctrl := gomock.NewController(t)
gcb := mocks.NewMockVMOMISessionBuilder(ctrl)
c := &govmomi_internal.Client{
Client: &vim25.Client{},
}
gcb.EXPECT().Build(ctx, gomock.Any(), insecure).Return(c, nil)
mockFinder := mocks.NewMockVMOMIFinder(ctrl)
mockFinder.EXPECT().Datacenter(ctx, datacenter).Return(&datacenterObject, nil)
mockFinder.EXPECT().SetDatacenter(gomock.Any())
vfb := mocks.NewMockVMOMIFinderBuilder(ctrl)
vfb.EXPECT().Build(c.Client, true).Return(mockFinder)
amb := mocks.NewMockVMOMIAuthorizationManagerBuilder(ctrl)
amb.EXPECT().Build(c.Client)
vcb := govmomi.NewVMOMIClientBuilderOverride(vfb, gcb, amb)
_, err := vcb.Build(ctx, "myhost", "myusername", "mypassword", insecure, datacenter)
if err != nil {
t.Fatalf("Failed to build client with %s", err)
}
}
| 220 |
eks-anywhere | aws | Go | package govmomi
import (
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/vim25"
)
type vMOMIFinderBuilder struct{}
func NewVMOMIFinderBuilder() *vMOMIFinderBuilder {
return &vMOMIFinderBuilder{}
}
func (*vMOMIFinderBuilder) Build(client *vim25.Client, all ...bool) VMOMIFinder {
return find.NewFinder(client, all...)
}
| 17 |
eks-anywhere | aws | Go | package govmomi
import (
"context"
"net/url"
"github.com/vmware/govmomi"
)
type vMOMISessionBuilder struct{}
func NewvMOMISessionBuilder() *vMOMIClientBuilder {
return &vMOMIClientBuilder{}
}
func (*vMOMISessionBuilder) Build(ctx context.Context, u *url.URL, insecure bool) (*govmomi.Client, error) {
return govmomi.NewClient(ctx, u, insecure)
}
| 19 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/govmomi (interfaces: VSphereClient,VMOMIAuthorizationManager,VMOMIFinder,VMOMISessionBuilder,VMOMIFinderBuilder,VMOMIAuthorizationManagerBuilder)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
url "net/url"
reflect "reflect"
govmomi "github.com/aws/eks-anywhere/pkg/govmomi"
gomock "github.com/golang/mock/gomock"
govmomi0 "github.com/vmware/govmomi"
find "github.com/vmware/govmomi/find"
object "github.com/vmware/govmomi/object"
vim25 "github.com/vmware/govmomi/vim25"
types "github.com/vmware/govmomi/vim25/types"
)
// MockVSphereClient is a mock of VSphereClient interface.
type MockVSphereClient struct {
ctrl *gomock.Controller
recorder *MockVSphereClientMockRecorder
}
// MockVSphereClientMockRecorder is the mock recorder for MockVSphereClient.
type MockVSphereClientMockRecorder struct {
mock *MockVSphereClient
}
// NewMockVSphereClient creates a new mock instance.
func NewMockVSphereClient(ctrl *gomock.Controller) *MockVSphereClient {
mock := &MockVSphereClient{ctrl: ctrl}
mock.recorder = &MockVSphereClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVSphereClient) EXPECT() *MockVSphereClientMockRecorder {
return m.recorder
}
// GetPrivsOnEntity mocks base method.
func (m *MockVSphereClient) GetPrivsOnEntity(arg0 context.Context, arg1, arg2, arg3 string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPrivsOnEntity", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPrivsOnEntity indicates an expected call of GetPrivsOnEntity.
func (mr *MockVSphereClientMockRecorder) GetPrivsOnEntity(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivsOnEntity", reflect.TypeOf((*MockVSphereClient)(nil).GetPrivsOnEntity), arg0, arg1, arg2, arg3)
}
// Username mocks base method.
func (m *MockVSphereClient) Username() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Username")
ret0, _ := ret[0].(string)
return ret0
}
// Username indicates an expected call of Username.
func (mr *MockVSphereClientMockRecorder) Username() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Username", reflect.TypeOf((*MockVSphereClient)(nil).Username))
}
// MockVMOMIAuthorizationManager is a mock of VMOMIAuthorizationManager interface.
type MockVMOMIAuthorizationManager struct {
ctrl *gomock.Controller
recorder *MockVMOMIAuthorizationManagerMockRecorder
}
// MockVMOMIAuthorizationManagerMockRecorder is the mock recorder for MockVMOMIAuthorizationManager.
type MockVMOMIAuthorizationManagerMockRecorder struct {
mock *MockVMOMIAuthorizationManager
}
// NewMockVMOMIAuthorizationManager creates a new mock instance.
func NewMockVMOMIAuthorizationManager(ctrl *gomock.Controller) *MockVMOMIAuthorizationManager {
mock := &MockVMOMIAuthorizationManager{ctrl: ctrl}
mock.recorder = &MockVMOMIAuthorizationManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVMOMIAuthorizationManager) EXPECT() *MockVMOMIAuthorizationManagerMockRecorder {
return m.recorder
}
// FetchUserPrivilegeOnEntities mocks base method.
func (m *MockVMOMIAuthorizationManager) FetchUserPrivilegeOnEntities(arg0 context.Context, arg1 []types.ManagedObjectReference, arg2 string) ([]types.UserPrivilegeResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchUserPrivilegeOnEntities", arg0, arg1, arg2)
ret0, _ := ret[0].([]types.UserPrivilegeResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FetchUserPrivilegeOnEntities indicates an expected call of FetchUserPrivilegeOnEntities.
func (mr *MockVMOMIAuthorizationManagerMockRecorder) FetchUserPrivilegeOnEntities(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUserPrivilegeOnEntities", reflect.TypeOf((*MockVMOMIAuthorizationManager)(nil).FetchUserPrivilegeOnEntities), arg0, arg1, arg2)
}
// MockVMOMIFinder is a mock of VMOMIFinder interface.
type MockVMOMIFinder struct {
ctrl *gomock.Controller
recorder *MockVMOMIFinderMockRecorder
}
// MockVMOMIFinderMockRecorder is the mock recorder for MockVMOMIFinder.
type MockVMOMIFinderMockRecorder struct {
mock *MockVMOMIFinder
}
// NewMockVMOMIFinder creates a new mock instance.
func NewMockVMOMIFinder(ctrl *gomock.Controller) *MockVMOMIFinder {
mock := &MockVMOMIFinder{ctrl: ctrl}
mock.recorder = &MockVMOMIFinderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVMOMIFinder) EXPECT() *MockVMOMIFinderMockRecorder {
return m.recorder
}
// Datacenter mocks base method.
func (m *MockVMOMIFinder) Datacenter(arg0 context.Context, arg1 string) (*object.Datacenter, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Datacenter", arg0, arg1)
ret0, _ := ret[0].(*object.Datacenter)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Datacenter indicates an expected call of Datacenter.
func (mr *MockVMOMIFinderMockRecorder) Datacenter(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Datacenter", reflect.TypeOf((*MockVMOMIFinder)(nil).Datacenter), arg0, arg1)
}
// Datastore mocks base method.
func (m *MockVMOMIFinder) Datastore(arg0 context.Context, arg1 string) (*object.Datastore, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Datastore", arg0, arg1)
ret0, _ := ret[0].(*object.Datastore)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Datastore indicates an expected call of Datastore.
func (mr *MockVMOMIFinderMockRecorder) Datastore(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Datastore", reflect.TypeOf((*MockVMOMIFinder)(nil).Datastore), arg0, arg1)
}
// Folder mocks base method.
func (m *MockVMOMIFinder) Folder(arg0 context.Context, arg1 string) (*object.Folder, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Folder", arg0, arg1)
ret0, _ := ret[0].(*object.Folder)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Folder indicates an expected call of Folder.
func (mr *MockVMOMIFinderMockRecorder) Folder(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Folder", reflect.TypeOf((*MockVMOMIFinder)(nil).Folder), arg0, arg1)
}
// Network mocks base method.
func (m *MockVMOMIFinder) Network(arg0 context.Context, arg1 string) (object.NetworkReference, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Network", arg0, arg1)
ret0, _ := ret[0].(object.NetworkReference)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Network indicates an expected call of Network.
func (mr *MockVMOMIFinderMockRecorder) Network(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Network", reflect.TypeOf((*MockVMOMIFinder)(nil).Network), arg0, arg1)
}
// ResourcePool mocks base method.
func (m *MockVMOMIFinder) ResourcePool(arg0 context.Context, arg1 string) (*object.ResourcePool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResourcePool", arg0, arg1)
ret0, _ := ret[0].(*object.ResourcePool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ResourcePool indicates an expected call of ResourcePool.
func (mr *MockVMOMIFinderMockRecorder) ResourcePool(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourcePool", reflect.TypeOf((*MockVMOMIFinder)(nil).ResourcePool), arg0, arg1)
}
// SetDatacenter mocks base method.
func (m *MockVMOMIFinder) SetDatacenter(arg0 *object.Datacenter) *find.Finder {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetDatacenter", arg0)
ret0, _ := ret[0].(*find.Finder)
return ret0
}
// SetDatacenter indicates an expected call of SetDatacenter.
func (mr *MockVMOMIFinderMockRecorder) SetDatacenter(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDatacenter", reflect.TypeOf((*MockVMOMIFinder)(nil).SetDatacenter), arg0)
}
// VirtualMachine mocks base method.
func (m *MockVMOMIFinder) VirtualMachine(arg0 context.Context, arg1 string) (*object.VirtualMachine, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*object.VirtualMachine)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VirtualMachine indicates an expected call of VirtualMachine.
func (mr *MockVMOMIFinderMockRecorder) VirtualMachine(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VirtualMachine", reflect.TypeOf((*MockVMOMIFinder)(nil).VirtualMachine), arg0, arg1)
}
// MockVMOMISessionBuilder is a mock of VMOMISessionBuilder interface.
type MockVMOMISessionBuilder struct {
ctrl *gomock.Controller
recorder *MockVMOMISessionBuilderMockRecorder
}
// MockVMOMISessionBuilderMockRecorder is the mock recorder for MockVMOMISessionBuilder.
type MockVMOMISessionBuilderMockRecorder struct {
mock *MockVMOMISessionBuilder
}
// NewMockVMOMISessionBuilder creates a new mock instance.
func NewMockVMOMISessionBuilder(ctrl *gomock.Controller) *MockVMOMISessionBuilder {
mock := &MockVMOMISessionBuilder{ctrl: ctrl}
mock.recorder = &MockVMOMISessionBuilderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVMOMISessionBuilder) EXPECT() *MockVMOMISessionBuilderMockRecorder {
return m.recorder
}
// Build mocks base method.
func (m *MockVMOMISessionBuilder) Build(arg0 context.Context, arg1 *url.URL, arg2 bool) (*govmomi0.Client, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Build", arg0, arg1, arg2)
ret0, _ := ret[0].(*govmomi0.Client)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Build indicates an expected call of Build.
func (mr *MockVMOMISessionBuilderMockRecorder) Build(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Build", reflect.TypeOf((*MockVMOMISessionBuilder)(nil).Build), arg0, arg1, arg2)
}
// MockVMOMIFinderBuilder is a mock of VMOMIFinderBuilder interface.
type MockVMOMIFinderBuilder struct {
ctrl *gomock.Controller
recorder *MockVMOMIFinderBuilderMockRecorder
}
// MockVMOMIFinderBuilderMockRecorder is the mock recorder for MockVMOMIFinderBuilder.
type MockVMOMIFinderBuilderMockRecorder struct {
mock *MockVMOMIFinderBuilder
}
// NewMockVMOMIFinderBuilder creates a new mock instance.
func NewMockVMOMIFinderBuilder(ctrl *gomock.Controller) *MockVMOMIFinderBuilder {
mock := &MockVMOMIFinderBuilder{ctrl: ctrl}
mock.recorder = &MockVMOMIFinderBuilderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVMOMIFinderBuilder) EXPECT() *MockVMOMIFinderBuilderMockRecorder {
return m.recorder
}
// Build mocks base method.
func (m *MockVMOMIFinderBuilder) Build(arg0 *vim25.Client, arg1 ...bool) govmomi.VMOMIFinder {
m.ctrl.T.Helper()
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Build", varargs...)
ret0, _ := ret[0].(govmomi.VMOMIFinder)
return ret0
}
// Build indicates an expected call of Build.
func (mr *MockVMOMIFinderBuilderMockRecorder) Build(arg0 interface{}, arg1 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0}, arg1...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Build", reflect.TypeOf((*MockVMOMIFinderBuilder)(nil).Build), varargs...)
}
// MockVMOMIAuthorizationManagerBuilder is a mock of VMOMIAuthorizationManagerBuilder interface.
type MockVMOMIAuthorizationManagerBuilder struct {
ctrl *gomock.Controller
recorder *MockVMOMIAuthorizationManagerBuilderMockRecorder
}
// MockVMOMIAuthorizationManagerBuilderMockRecorder is the mock recorder for MockVMOMIAuthorizationManagerBuilder.
type MockVMOMIAuthorizationManagerBuilderMockRecorder struct {
mock *MockVMOMIAuthorizationManagerBuilder
}
// NewMockVMOMIAuthorizationManagerBuilder creates a new mock instance.
func NewMockVMOMIAuthorizationManagerBuilder(ctrl *gomock.Controller) *MockVMOMIAuthorizationManagerBuilder {
mock := &MockVMOMIAuthorizationManagerBuilder{ctrl: ctrl}
mock.recorder = &MockVMOMIAuthorizationManagerBuilderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVMOMIAuthorizationManagerBuilder) EXPECT() *MockVMOMIAuthorizationManagerBuilderMockRecorder {
return m.recorder
}
// Build mocks base method.
func (m *MockVMOMIAuthorizationManagerBuilder) Build(arg0 *vim25.Client) *object.AuthorizationManager {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Build", arg0)
ret0, _ := ret[0].(*object.AuthorizationManager)
return ret0
}
// Build indicates an expected call of Build.
func (mr *MockVMOMIAuthorizationManagerBuilderMockRecorder) Build(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Build", reflect.TypeOf((*MockVMOMIAuthorizationManagerBuilder)(nil).Build), arg0)
}
| 354 |
eks-anywhere | aws | Go | package helm
import (
"context"
"fmt"
"sort"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/oci"
)
type Client interface {
RegistryLogin(ctx context.Context, registry, username, password string) error
PushChart(ctx context.Context, chart, registry string) error
SaveChart(ctx context.Context, ociURI, version, folder string) error
}
type ChartRegistryDownloader struct {
client Client
dstFolder string
}
func NewChartRegistryDownloader(client Client, dstFolder string) *ChartRegistryDownloader {
return &ChartRegistryDownloader{
client: client,
dstFolder: dstFolder,
}
}
func (d *ChartRegistryDownloader) Download(ctx context.Context, charts ...string) error {
for _, chart := range uniqueCharts(charts) {
chartURL, chartVersion := oci.ChartURLAndVersion(chart)
logger.Info("Saving helm chart to disk", "chart", chart)
if err := d.client.SaveChart(ctx, chartURL, chartVersion, d.dstFolder); err != nil {
return fmt.Errorf("downloading chart [%s] from registry: %v", chart, err)
}
}
return nil
}
func uniqueCharts(charts []string) []string {
c := types.SliceToLookup(charts).ToSlice()
// TODO: maybe optimize this, avoiding the sort and just following the same order as the original slice
sort.Strings(c)
return c
}
| 49 |
eks-anywhere | aws | Go | package helm_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/helm"
"github.com/aws/eks-anywhere/pkg/helm/mocks"
)
func TestChartRegistryDownloaderDownload(t *testing.T) {
g := NewWithT(t)
charts := []string{"ecr.com/chart1:v1.1.0", "ecr.com/chart2:v2.2.0", "ecr.com/chart1:v1.1.0"}
ctx := context.Background()
folder := "folder"
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
client.EXPECT().SaveChart(ctx, "oci://ecr.com/chart1", "v1.1.0", folder)
client.EXPECT().SaveChart(ctx, "oci://ecr.com/chart2", "v2.2.0", folder)
d := helm.NewChartRegistryDownloader(client, folder)
g.Expect(d.Download(ctx, charts...)).To(Succeed())
}
func TestChartRegistryDownloaderDownloadError(t *testing.T) {
g := NewWithT(t)
charts := []string{"ecr.com/chart1:v1.1.0", "ecr.com/chart2:v2.2.0", "ecr.com/chart1:v1.1.0"}
ctx := context.Background()
folder := "folder"
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
client.EXPECT().SaveChart(ctx, "oci://ecr.com/chart1", "v1.1.0", folder)
client.EXPECT().SaveChart(ctx, "oci://ecr.com/chart2", "v2.2.0", folder).Return(errors.New("failed downloading"))
d := helm.NewChartRegistryDownloader(client, folder)
g.Expect(d.Download(ctx, charts...)).To(MatchError(ContainSubstring("downloading chart [ecr.com/chart2:v2.2.0] from registry: failed downloading")))
}
| 42 |
eks-anywhere | aws | Go | package helm
import (
"path/filepath"
"strings"
)
func ChartFileName(chart string) string {
return strings.Replace(filepath.Base(chart), ":", "-", 1) + ".tgz"
}
| 11 |
eks-anywhere | aws | Go | package helm_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/helm"
)
func TestChartFileName(t *testing.T) {
tests := []struct {
name string
chart string
want string
}{
{
name: "full path",
chart: "ecr.com/folder/folder2/chart-name:1.0.0",
want: "chart-name-1.0.0.tgz",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(helm.ChartFileName(tt.chart)).To(Equal(tt.want))
})
}
}
| 30 |
eks-anywhere | aws | Go | package helm
import (
"context"
"fmt"
"path/filepath"
"github.com/aws/eks-anywhere/pkg/utils/oci"
"github.com/aws/eks-anywhere/pkg/utils/urls"
)
type ChartRegistryImporter struct {
client Client
registry string
username, password string
srcFolder string
}
func NewChartRegistryImporter(client Client, srcFolder, registry, username, password string) *ChartRegistryImporter {
return &ChartRegistryImporter{
client: client,
srcFolder: srcFolder,
registry: registry,
username: username,
password: password,
}
}
func (i *ChartRegistryImporter) Import(ctx context.Context, charts ...string) error {
if err := i.client.RegistryLogin(ctx, i.registry, i.username, i.password); err != nil {
return fmt.Errorf("importing charts: %v", err)
}
for _, chart := range uniqueCharts(charts) {
pushChartURL := oci.ChartPushURL(chart)
pushChartURL = urls.ReplaceHost(pushChartURL, i.registry)
chartFilepath := filepath.Join(i.srcFolder, ChartFileName(chart))
if err := i.client.PushChart(ctx, chartFilepath, pushChartURL); err != nil {
return fmt.Errorf("pushing chart [%s] to registry [%s]: %v", chart, i.registry, err)
}
}
return nil
}
| 46 |
eks-anywhere | aws | Go | package helm_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/helm"
"github.com/aws/eks-anywhere/pkg/helm/mocks"
)
func TestChartRegistryImporterImport(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
user := "u"
password := "pass"
registry := "registry.com:443"
srcFolder := "folder"
charts := []string{"ecr.com/project/chart1:v1.1.0", "ecr.com/project/chart2:v2.2.0", "ecr.com/project/chart1:v1.1.0"}
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
client.EXPECT().RegistryLogin(ctx, registry, user, password)
client.EXPECT().PushChart(ctx, "folder/chart1-v1.1.0.tgz", "oci://registry.com:443/project")
client.EXPECT().PushChart(ctx, "folder/chart2-v2.2.0.tgz", "oci://registry.com:443/project")
i := helm.NewChartRegistryImporter(client, srcFolder, registry, user, password)
g.Expect(i.Import(ctx, charts...)).To(Succeed())
}
func TestChartRegistryImporterImportLoginError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
user := "u"
password := "pass"
registry := "registry.com:443"
srcFolder := "folder"
charts := []string{"ecr.com/project/chart1:v1.1.0", "ecr.com/project/chart2:v2.2.0", "ecr.com/project/chart1:v1.1.0"}
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
client.EXPECT().RegistryLogin(ctx, registry, user, password).Return(errors.New("logging error"))
i := helm.NewChartRegistryImporter(client, srcFolder, registry, user, password)
g.Expect(i.Import(ctx, charts...)).To(MatchError(ContainSubstring("importing charts: logging error")))
}
func TestChartRegistryImporterImportPushError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
user := "u"
password := "pass"
registry := "registry.com:443"
srcFolder := "folder"
charts := []string{"ecr.com/project/chart1:v1.1.0", "ecr.com/project/chart2:v2.2.0", "ecr.com/project/chart1:v1.1.0"}
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
client.EXPECT().RegistryLogin(ctx, registry, user, password)
client.EXPECT().PushChart(ctx, "folder/chart1-v1.1.0.tgz", "oci://registry.com:443/project").Return(errors.New("pushing error"))
i := helm.NewChartRegistryImporter(client, srcFolder, registry, user, password)
g.Expect(i.Import(ctx, charts...)).To(MatchError(ContainSubstring("pushing chart [ecr.com/project/chart1:v1.1.0] to registry [registry.com:443]: pushing error")))
}
| 68 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/helm/download.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// PushChart mocks base method.
func (m *MockClient) PushChart(ctx context.Context, chart, registry string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushChart", ctx, chart, registry)
ret0, _ := ret[0].(error)
return ret0
}
// PushChart indicates an expected call of PushChart.
func (mr *MockClientMockRecorder) PushChart(ctx, chart, registry interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushChart", reflect.TypeOf((*MockClient)(nil).PushChart), ctx, chart, registry)
}
// RegistryLogin mocks base method.
func (m *MockClient) RegistryLogin(ctx context.Context, registry, username, password string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegistryLogin", ctx, registry, username, password)
ret0, _ := ret[0].(error)
return ret0
}
// RegistryLogin indicates an expected call of RegistryLogin.
func (mr *MockClientMockRecorder) RegistryLogin(ctx, registry, username, password interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegistryLogin", reflect.TypeOf((*MockClient)(nil).RegistryLogin), ctx, registry, username, password)
}
// SaveChart mocks base method.
func (m *MockClient) SaveChart(ctx context.Context, ociURI, version, folder string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveChart", ctx, ociURI, version, folder)
ret0, _ := ret[0].(error)
return ret0
}
// SaveChart indicates an expected call of SaveChart.
func (mr *MockClientMockRecorder) SaveChart(ctx, ociURI, version, folder interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveChart", reflect.TypeOf((*MockClient)(nil).SaveChart), ctx, ociURI, version, folder)
}
| 78 |
eks-anywhere | aws | Go | package kubeconfig
import "fmt"
// FormatWorkloadClusterKubeconfigFilename returns a filename for the Kubeconfig of workload
// clusters. The filename does not include a basepath.
func FormatWorkloadClusterKubeconfigFilename(clusterName string) string {
return fmt.Sprintf("%s-eks-a-cluster.kubeconfig", clusterName)
}
| 10 |
eks-anywhere | aws | Go | package kubeconfig
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"k8s.io/client-go/tools/clientcmd"
"github.com/aws/eks-anywhere/pkg/validations"
)
// FromClusterFormat defines the format of the kubeconfig of the.
const FromClusterFormat = "%s-eks-a-cluster.kubeconfig"
// EnvName is the standard KubeConfig environment variable name.
// https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable
const EnvName = "KUBECONFIG"
// FromClusterName formats an expected Kubeconfig path for EKS-A clusters. This includes a subdirecftory
// named after the cluster name. For example, if the clusterName is 'sandbox' the generated path would be
// sandbox/sandbox-eks-a-cluster.kubeconfig.
func FromClusterName(clusterName string) string {
return filepath.Join(clusterName, fmt.Sprintf(FromClusterFormat, clusterName))
}
// FromEnvironment returns the first kubeconfig file specified in the
// KUBECONFIG environment variable.
//
// The environment variable can contain a list of files, much like how the
// PATH environment variable contains a list of directories.
func FromEnvironment() string {
trimmed := strings.TrimSpace(os.Getenv(EnvName))
for _, filename := range filepath.SplitList(trimmed) {
return filename
}
return ""
}
// ResolveFilename returns a path to a kubeconfig file by priority.
//
// The priority is:
//
// 1. CLI flag (flagValue)
// 2. A file created at cluster creation, found by a combining the cluster
// name with present working directory.
// 3. The first filename found in the KUBECONFIG environment variable.
//
// NO VALIDATION IS PERFORMED. See ValidateFile for validation.
//
// There are other places one may wish to consult or load a kubeconfig file
// from, but this function tries to walk the narrow line between what the
// kubernetes client code does (#1, #3, and some other things that we more or
// less don't support), with some of the existing EKSA CLI tools that look for
// kubeconfig files relative to the working directory that were created at
// cluster creation time. These different functionalities don't always mesh,
// and aren't always compatible, but this function tries to combine them as
// much as possible, without breaking either.
func ResolveFilename(flagValue, clusterName string) string {
if flagValue != "" {
return flagValue
}
if clusterName != "" {
return FromClusterName(clusterName)
}
return FromEnvironment()
}
// ResolveAndValidate composes ResolveFilename and ValidateFile.
//
// Literally, that's all it does. They're frequently called together, so
// hopefully this is a helper.
func ResolveAndValidateFilename(flagValue, clusterName string) (string, error) {
filename := ResolveFilename(flagValue, clusterName)
if err := ValidateFilename(filename); err != nil {
return "", err
}
return filename, nil
}
// ValidateFilename loads a file to validate it's basic contents.
//
// The values of the fields within aren't validated, but the file's existence
// and basic structure are checked.
func ValidateFilename(filename string) error {
wrapError := func(err error) error {
return fmt.Errorf("validating kubeconfig %q: %w", filename, err)
}
// Trim whitespace from the beginning and end of the filename. While these
// could technically be valid filenames, it's far more likely a typo or
// shell-parsing bug.
trimmed := strings.TrimSpace(filename)
if !validations.FileExists(trimmed) {
return wrapError(fs.ErrNotExist)
}
if !validations.FileExistsAndIsNotEmpty(trimmed) {
return wrapError(fmt.Errorf("is empty"))
}
if _, err := clientcmd.LoadFromFile(trimmed); err != nil {
return wrapError(err)
}
return nil
}
func ValidateKubeconfigPath(clusterName string, parentFolders ...string) error {
kubeconfigPath := FromClusterName(clusterName)
for _, folder := range parentFolders {
kubeconfigPath = filepath.Join(folder, kubeconfigPath)
}
info, err := os.Stat(kubeconfigPath)
if err == nil && info.Size() > 0 {
return fmt.Errorf(
"old cluster config file exists under %s, please use a different clusterName to proceed",
clusterName,
)
}
return nil
}
| 128 |
eks-anywhere | aws | Go | package kubeconfig_test
import (
"bytes"
"io/fs"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
var kindTypo = []byte(`apiVersion: v1\nkind: Conf`)
var goodKubeconfig = []byte(`
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://127.0.0.1:38471
name: test
contexts:
- context:
cluster: test
user: test-admin
name: test-admin@test
current-context: test-admin@test
kind: Config
preferences: {}
users:
- name: test-admin
user:
client-certificate-data: test
client-key-data: test
`)
func TestFromEnvironment(t *testing.T) {
t.Run("returns the filename from the env var", func(t *testing.T) {
expected := "file one"
t.Setenv("KUBECONFIG", expected)
got := kubeconfig.FromEnvironment()
if got != expected {
t.Fatalf("expected %q, got %q", expected, got)
}
})
t.Run("works with longer paths", func(t *testing.T) {
expected := "/home/user/some/long/path/or file/directory structure/config"
t.Setenv("KUBECONFIG", expected)
got := kubeconfig.FromEnvironment()
if got != expected {
t.Fatalf("expected %q, got %q", expected, got)
}
})
t.Run("returns the first file in a list", func(t *testing.T) {
expected := "file one"
t.Setenv("KUBECONFIG", expected+":filetwo")
got := kubeconfig.FromEnvironment()
if got != expected {
t.Fatalf("expected %q, got %q", expected, got)
}
})
t.Run("returns an empty string if no files are found", func(t *testing.T) {
expected := ""
t.Setenv("KUBECONFIG", "")
got := kubeconfig.FromEnvironment()
if got != expected {
t.Fatalf("expected %q, got %q", expected, got)
}
})
t.Run("trims whitespace, so as not to return 'empty' filenames", func(t *testing.T) {
expected := ""
t.Setenv("KUBECONFIG", " ")
got := kubeconfig.FromEnvironment()
if got != expected {
t.Fatalf("expected %q, got %q", expected, got)
}
expected = ""
t.Setenv("KUBECONFIG", "\t")
got = kubeconfig.FromEnvironment()
if got != expected {
t.Fatalf("expected %q, got %q", expected, got)
}
})
}
func TestValidateFilename(t *testing.T) {
t.Run("reports errors from validator", func(t *testing.T) {
badFile := test.WithFakeFileContents(t, bytes.NewReader(kindTypo))
assert.Error(t, kubeconfig.ValidateFilename(badFile.Name()))
})
t.Run("reports errors for files that are empty", func(t *testing.T) {
emptyFile := test.WithFakeFileContents(t, bytes.NewReader([]byte("")))
assert.Error(t, kubeconfig.ValidateFilename(emptyFile.Name()))
})
t.Run("reports errors for files that don't exist", func(t *testing.T) {
doesntExist := filepath.Join(t.TempDir(), "does-not-exist")
test.RemoveFileIfExists(t, doesntExist)
assert.ErrorIs(t, kubeconfig.ValidateFilename(doesntExist), fs.ErrNotExist)
})
t.Run("returns nil when valid", func(t *testing.T) {
goodFile := test.WithFakeFileContents(t, bytes.NewReader(goodKubeconfig))
assert.NoError(t, kubeconfig.ValidateFilename(goodFile.Name()))
})
t.Run("trims whitespace around a filename", func(t *testing.T) {
goodFile := test.WithFakeFileContents(t, bytes.NewReader(goodKubeconfig))
assert.NoError(t, kubeconfig.ValidateFilename(" "+goodFile.Name()+"\t\n"))
})
t.Run("reports errors for filenames that are the empty string", func(t *testing.T) {
assert.Error(t, kubeconfig.ValidateFilename(""))
})
t.Run("reports errors for filenames that are only whitespace (as if it were the empty string)", func(t *testing.T) {
assert.Error(t, kubeconfig.ValidateFilename(" \t \n"))
})
}
func TestResolveFilename(t *testing.T) {
t.Run("returns the flag value when provided", func(t *testing.T) {
expected := "flag-provided-kubeconfig"
filename := kubeconfig.ResolveFilename(expected, "cluster-name")
assert.Equal(t, expected, filename)
})
t.Run("returns the cluster-name based filename when provided, and the flag value is empty", func(t *testing.T) {
clusterName := "cluster-name"
expected := kubeconfig.FromClusterName(clusterName)
assert.Equal(t, expected, kubeconfig.ResolveFilename("", clusterName))
})
t.Run("returns the environment value if neither the flag or cluster-name values are provided", func(t *testing.T) {
expected := "some-value"
t.Setenv("KUBECONFIG", expected)
assert.Equal(t, expected, kubeconfig.ResolveFilename("", ""))
})
}
func TestResolveAndValidateFilename(t *testing.T) {
t.Run("returns flag value when valid", func(t *testing.T) {
goodFile := test.WithFakeFileContents(t, bytes.NewReader(goodKubeconfig))
filename, err := kubeconfig.ResolveAndValidateFilename(goodFile.Name(), "")
if assert.NoError(t, err) {
assert.Equal(t, goodFile.Name(), filename)
}
})
t.Run("returns error when invalid", func(t *testing.T) {
goodFile := test.WithFakeFileContents(t, bytes.NewBufferString("lakjdf"))
_, err := kubeconfig.ResolveAndValidateFilename(goodFile.Name(), "")
assert.Error(t, err)
})
t.Run("returns an error if no kubeconfig is found", func(t *testing.T) {
t.Setenv("KUBECONFIG", "")
_, err := kubeconfig.ResolveAndValidateFilename("", "")
assert.Error(t, err)
})
t.Run("golden path", func(t *testing.T) {
t.Setenv("KUBECONFIG", "")
_, err := kubeconfig.ResolveAndValidateFilename("", "")
assert.Error(t, err)
})
}
| 188 |
eks-anywhere | aws | Go | /*
Package logger implements a simple way to init a global logger and access it
through a logr.Logger interface.
Message:
All messages should start with a capital letter.
Log level:
The loggers only support verbosity levels (V-levels) instead of semantic levels.
Level zero, the default, is important for the end user.
- 0: You always want to see this.
- 1: Common logging that you don't want to show by default.
- 2: Useful steady state information about the operation and important log messages that may correlate to significant changes in the system.
- 3: Extended information about changes. Somehow useful information to the user that is not important enough for level 2.
- 4: Debugging information. Starting from this level, all logs are oriented to developers and troubleshooting.
- 5: Traces. Information to follow the code path.
- 6: Information about interaction with external resources. External binary commands, api calls.
- 7: Extra information passed to external systems. Configuration files, kubernetes manifests, etc.
- 8: Truncated external binaries and clients output/responses.
- 9: Full external binaries and clients output/responses.
Logging WithValues:
Logging WithValues should be preferred to embedding values into log messages because it allows
machine readability.
Variables name should start with a capital letter.
Logging WithNames:
Logging WithNames should be used carefully.
Please consider that practices like prefixing the logs with something indicating which part of code
is generating the log entry might be useful for developers, but it can create confusion for
the end users because it increases the verbosity without providing information the user can understand/take benefit from.
Logging errors:
A proper error management should always be preferred to the usage of log.Error.
*/
package logger
| 43 |
eks-anywhere | aws | Go | package logger
import (
"fmt"
"os"
"time"
"github.com/go-logr/zapr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Init initializes the package logger. Repeat calls will overwrite the package logger which may
// result in unexpected behavior.
func Init(opts Options) error {
encoderCfg := zap.NewDevelopmentEncoderConfig()
encoderCfg.EncodeLevel = nil
encoderCfg.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {}
// Level 4 and above are used for debugging and we want a different log structure for debug
// logs.
if opts.Level >= 4 {
encoderCfg.EncodeLevel = func(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
// Because we use negated levels it is necessary to negate the level again so the
// output appears in a V0 format.
//
// See logrAtomicLevel().
enc.AppendString(fmt.Sprintf("V%d", -int(l)))
}
encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder
}
// zapcore.Open creates a noop logger if no paths are passed. Using a slice ensures we expand
// the slice to nothing when opts.OutputFilePath is unset.
var logPath []string
if opts.OutputFilePath != "" {
logPath = append(logPath, opts.OutputFilePath)
}
logFile, _, err := zap.Open(logPath...)
if err != nil {
return err
}
// Build the encoders and logger.
fileEncoder := zapcore.NewJSONEncoder(encoderCfg)
consoleEncoder := zapcore.NewConsoleEncoder(encoderCfg)
core := zapcore.NewTee(
zapcore.NewCore(consoleEncoder, zapcore.AddSync(os.Stdout), logrAtomicLevel(opts.Level)),
zapcore.NewCore(fileEncoder, logFile, logrAtomicLevel(MaxLogLevel)),
)
logger := zap.New(core)
// Configure package state so the logger can be used by other packages.
setLogger(zapr.NewLogger(logger))
setOutputFilePath(opts.OutputFilePath)
return nil
}
// Options represents a set of arguments for initializing the zap logger.
type Options struct {
// Level is the log level at which to configure the logger from 0 to 9.
Level int
// OutputFilePath is an absolute file path. The file will be created if it doesn't exist.
// All logs available at level 9 will be written to the file.
OutputFilePath string
}
// logrAtomicLevel creates a zapcore.AtomicLevel compatible with go-logr.
func logrAtomicLevel(level int) zap.AtomicLevel {
// The go-logr wrapper uses custom Zap log levels. To represent this in Zap, its
// necessary to negate the level to circumvent Zap level constraints.
//
// See https://github.com/go-logr/zapr/blob/master/zapr.go#L50.
return zap.NewAtomicLevelAt(zapcore.Level(-level))
}
| 82 |
eks-anywhere | aws | Go | package logger_test
import (
"bytes"
"io"
"os"
"path/filepath"
"testing"
"github.com/aws/eks-anywhere/pkg/logger"
)
func TestInit(t *testing.T) {
tdir := t.TempDir()
defer os.RemoveAll(tdir)
logFile := filepath.Join(tdir, "test.log")
err := logger.Init(logger.Options{
OutputFilePath: logFile,
})
if err != nil {
t.Fatal(err)
}
message := "log me"
logger.Info(message)
// Opening the file validates it exists.
f, err := os.Open(logFile)
if err != nil {
t.Fatalf("Error opening log file: %v", err)
}
defer f.Close()
// Ensure we're writing data to the logger from package functions.
buf, err := io.ReadAll(f)
if err != nil {
t.Fatalf("Reading log file: %v", err)
}
if !bytes.Contains(buf, []byte(message)) {
t.Fatalf("Log file does not contain expected message: %s", message)
}
}
| 46 |
eks-anywhere | aws | Go | package logger
import "sync"
// This source file uses package state to configure output file paths for the bundle to retrieve.
// The relationship between the 2 components is undesirable and will be refactored to pass the
// path to bundle generation code explicitly.
//
// Please avoid using the GetOutputFilePath() function in new code.
var (
outputFilePath string
outputFilePathMtx sync.Mutex
)
func setOutputFilePath(path string) {
outputFilePathMtx.Lock()
defer outputFilePathMtx.Unlock()
outputFilePath = path
}
// GetOutputFilePath returns the path to the file where high verbosity logs are written to.
// If the logger hasn't been configured to output to a file, it returns an empty string.
//
// Deprecated: The function will be removed to avoid using package state.
func GetOutputFilePath() string {
outputFilePathMtx.Lock()
defer outputFilePathMtx.Unlock()
return outputFilePath
}
| 31 |
eks-anywhere | aws | Go | package manifests
import (
"context"
"fmt"
eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/manifests/releases"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type FileReader interface {
ReadFile(url string) ([]byte, error)
}
type Reader struct {
FileReader
releasesManifestURL string
}
type ReaderOpt func(*Reader)
func WithReleasesManifest(manifestURL string) ReaderOpt {
return func(r *Reader) {
r.releasesManifestURL = manifestURL
}
}
func NewReader(filereader FileReader, opts ...ReaderOpt) *Reader {
r := &Reader{
FileReader: filereader,
releasesManifestURL: releases.ManifestURL(),
}
for _, opt := range opts {
opt(r)
}
return r
}
func (r *Reader) ReadBundlesForVersion(version string) (*releasev1.Bundles, error) {
rls, err := releases.ReadReleasesFromURL(r, r.releasesManifestURL)
if err != nil {
return nil, err
}
release, err := releases.ReleaseForVersion(rls, version)
if err != nil {
return nil, err
}
if release == nil {
return nil, fmt.Errorf("invalid version %s, no matching release found", version)
}
return releases.ReadBundlesForRelease(r, release)
}
func (r *Reader) ReadEKSD(eksaVersion, kubeVersion string) (*eksdv1.Release, error) {
b, err := r.ReadBundlesForVersion(eksaVersion)
if err != nil {
return nil, err
}
versionsBundle := bundles.VersionsBundleForKubernetesVersion(b, kubeVersion)
if versionsBundle == nil {
return nil, fmt.Errorf("kubernetes version %s is not supported by bundles manifest %d", kubeVersion, b.Spec.Number)
}
return bundles.ReadEKSD(r, *versionsBundle)
}
func (r *Reader) ReadImages(eksaVersion string) ([]releasev1.Image, error) {
bundle, err := r.ReadBundlesForVersion(eksaVersion)
if err != nil {
return nil, err
}
return bundles.ReadImages(r, bundle)
}
func (r *Reader) ReadImagesFromBundles(_ context.Context, b *releasev1.Bundles) ([]releasev1.Image, error) {
return bundles.ReadImages(r, b)
}
func (r *Reader) ReadCharts(eksaVersion string) ([]releasev1.Image, error) {
bundle, err := r.ReadBundlesForVersion(eksaVersion)
if err != nil {
return nil, err
}
return bundles.Charts(bundle), nil
}
func (r *Reader) ReadChartsFromBundles(ctx context.Context, b *releasev1.Bundles) []releasev1.Image {
return bundles.Charts(b)
}
| 99 |
eks-anywhere | aws | Go | package manifests_test
import (
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test/mocks"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/manifests/releases"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestReaderReadBundlesForVersion(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
releasesURL := releases.ManifestURL()
releasesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: release-1
spec:
releases:
- bundleManifestUrl: "https://bundles/bundles.yaml"
version: v0.0.1`
reader.EXPECT().ReadFile(releasesURL).Return([]byte(releasesManifest), nil)
bundlesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Bundles
metadata:
name: bundles-1`
wantBundles := &releasev1.Bundles{
TypeMeta: metav1.TypeMeta{
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
Kind: "Bundles",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bundles-1",
},
}
reader.EXPECT().ReadFile("https://bundles/bundles.yaml").Return([]byte(bundlesManifest), nil)
r := manifests.NewReader(reader)
g.Expect(r.ReadBundlesForVersion("v0.0.1")).To(Equal(wantBundles))
}
func TestReaderReadBundlesForVersionErrorVersionNotSupported(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
releasesURL := releases.ManifestURL()
releasesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: release-1
spec:
releases:
- bundleManifestUrl: "https://bundles/bundles.yaml"
version: v0.0.1`
reader.EXPECT().ReadFile(releasesURL).Return([]byte(releasesManifest), nil)
r := manifests.NewReader(reader)
_, err := r.ReadBundlesForVersion("v0.0.2")
g.Expect(err).To(MatchError(ContainSubstring("invalid version v0.0.2, no matching release found")))
}
func TestReaderReadEKSD(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
releasesURL := releases.ManifestURL()
releasesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: release-1
spec:
releases:
- bundleManifestUrl: "https://bundles/bundles.yaml"
version: v0.0.1`
reader.EXPECT().ReadFile(releasesURL).Return([]byte(releasesManifest), nil)
bundlesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Bundles
metadata:
name: bundles-1
spec:
versionsBundles:
- kubeVersion: "1.21"
eksD:
channel: 1-21
kubeVersion: v1.21.5
manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-7.yaml`
reader.EXPECT().ReadFile("https://bundles/bundles.yaml").Return([]byte(bundlesManifest), nil)
reader.EXPECT().ReadFile("https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-7.yaml").Return([]byte(bundlesManifest), nil)
r := manifests.NewReader(reader)
_, err := r.ReadEKSD("v0.0.1", "1.21")
g.Expect(err).ToNot(HaveOccurred())
}
func TestReaderReadEKSDUnsupportedKubeVersion(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
releasesURL := releases.ManifestURL()
releasesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: release-1
spec:
releases:
- bundleManifestUrl: "https://bundles/bundles.yaml"
version: v0.0.1`
reader.EXPECT().ReadFile(releasesURL).Return([]byte(releasesManifest), nil)
bundlesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Bundles
metadata:
name: bundles-1
spec:
versionsBundles:
- kubeVersion: "1.21"
eksD:
channel: 1-21
kubeVersion: v1.21.5
manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-7.yaml`
reader.EXPECT().ReadFile("https://bundles/bundles.yaml").Return([]byte(bundlesManifest), nil)
r := manifests.NewReader(reader)
_, err := r.ReadEKSD("v0.0.1", "1.22")
g.Expect(err).To(MatchError(ContainSubstring("kubernetes version 1.22 is not supported by bundles manifest 0")))
}
func TestReaderReadImages(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
releasesURL := releases.ManifestURL()
releasesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: release-1
spec:
releases:
- bundleManifestUrl: "https://bundles/bundles.yaml"
version: v0.0.1`
reader.EXPECT().ReadFile(releasesURL).Return([]byte(releasesManifest), nil)
bundlesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Bundles
metadata:
name: bundles-1
spec:
versionsBundles:
- kubeVersion: "1.21"
eksD:
channel: 1-21
kubeVersion: v1.21.5
manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-7.yaml`
reader.EXPECT().ReadFile("https://bundles/bundles.yaml").Return([]byte(bundlesManifest), nil)
reader.EXPECT().ReadFile("https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-7.yaml").Return([]byte(bundlesManifest), nil)
r := manifests.NewReader(reader)
_, err := r.ReadImages("v0.0.1")
g.Expect(err).ToNot(HaveOccurred())
}
func TestReaderReadCharts(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
releasesURL := releases.ManifestURL()
releasesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: release-1
spec:
releases:
- bundleManifestUrl: "https://bundles/bundles.yaml"
version: v0.0.1`
reader.EXPECT().ReadFile(releasesURL).Return([]byte(releasesManifest), nil)
bundlesManifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Bundles
metadata:
name: bundles-1`
reader.EXPECT().ReadFile("https://bundles/bundles.yaml").Return([]byte(bundlesManifest), nil)
r := manifests.NewReader(reader)
charts, err := r.ReadCharts("v0.0.1")
g.Expect(err).ToNot(HaveOccurred())
g.Expect(charts).To(BeEmpty())
}
| 215 |
eks-anywhere | aws | Go | package bundles
import releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
func Charts(bundles *releasev1.Bundles) []releasev1.Image {
var charts []releasev1.Image
for _, v := range bundles.Spec.VersionsBundles {
versionsBundleCharts := v.Charts()
for _, c := range versionsBundleCharts {
charts = append(charts, *c)
}
}
return charts
}
| 16 |
eks-anywhere | aws | Go | package bundles_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestCharts(t *testing.T) {
g := NewWithT(t)
b := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{},
{},
},
},
}
g.Expect(bundles.Charts(b)).NotTo(BeEmpty())
}
| 25 |
eks-anywhere | aws | Go | package bundles
import (
"fmt"
"github.com/aws/eks-anywhere/pkg/manifests/eksd"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func ReadImages(reader Reader, bundles *releasev1.Bundles) ([]releasev1.Image, error) {
var images []releasev1.Image
for _, v := range bundles.Spec.VersionsBundles {
images = append(images, v.Images()...)
eksdRelease, err := ReadEKSD(reader, v)
if err != nil {
return nil, fmt.Errorf("reading images from Bundle: %v", err)
}
for _, i := range eksd.Images(eksdRelease) {
images = append(images, releasev1.Image{
Name: i.Name,
Description: i.Description,
ImageDigest: i.Image.ImageDigest,
URI: i.Image.URI,
OS: i.OS,
Arch: i.Arch,
})
}
}
return images, nil
}
| 34 |
eks-anywhere | aws | Go | package bundles_test
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test/mocks"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
var eksdManifest = `apiVersion: distro.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
creationTimestamp: null
name: kubernetes-1-20-eks-1
spec:
channel: 1-20
number: 1
status:
components:
- assets:
- arch:
- amd64
- arm64
description: node-driver-registrar container image
image:
uri: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.1.0-eks-1-20-1
name: node-driver-registrar-image
os: linux
type: Image
gitTag: v2.1.0
name: node-driver-registrar
- assets:
- arch:
- amd64
- arm64
description: csi-snapshotter container image
image:
uri: public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter:v3.0.3-eks-1-20-1
name: csi-snapshotter-image
os: linux
type: Image`
func TestReadImages(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
eksdURL := "eksdurl"
b := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
EksD: releasev1.EksDRelease{
EksDReleaseUrl: eksdURL,
},
},
},
},
}
driverImage := releasev1.Image{
Name: "csi-snapshotter-image",
Description: "csi-snapshotter container image",
OS: "linux",
OSName: "",
Arch: []string{"amd64", "arm64"},
URI: "public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter:v3.0.3-eks-1-20-1",
ImageDigest: "",
}
csiImage := releasev1.Image{
Name: "csi-snapshotter-image",
Description: "csi-snapshotter container image",
OS: "linux",
OSName: "",
Arch: []string{"amd64", "arm64"},
URI: "public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter:v3.0.3-eks-1-20-1",
ImageDigest: "",
}
reader.EXPECT().ReadFile(eksdURL).Return([]byte(eksdManifest), nil)
gotImages, err := bundles.ReadImages(reader, b)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(gotImages)).To(BeNumerically(">", 2), "it should return more than the two images from eksd")
g.Expect(gotImages).To(ContainElement(driverImage), "it should return the node drive registar image in the eksd manifest")
g.Expect(gotImages).To(ContainElement(csiImage), "it should return the csi image in the eksd manifest")
}
func TestReadImagesError(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
eksdURL := "eksdurl"
b := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
EksD: releasev1.EksDRelease{
EksDReleaseUrl: eksdURL,
},
},
},
},
}
reader.EXPECT().ReadFile(eksdURL).Return(nil, errors.New("error reading eksd"))
_, err := bundles.ReadImages(reader, b)
g.Expect(err).To(MatchError(ContainSubstring("reading images from Bundle: error reading eksd")))
}
| 117 |
eks-anywhere | aws | Go | package bundles
import (
"path/filepath"
"github.com/pkg/errors"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
// Manifest holds the data of a manifest referenced in the Bundles.
type Manifest struct {
Filename string
Content []byte
}
// ReadManifest reads the content of a [releasev1.Manifest].
func ReadManifest(reader Reader, manifest releasev1.Manifest) (*Manifest, error) {
content, err := reader.ReadFile(manifest.URI)
if err != nil {
return nil, errors.Errorf("reading manifest %s: %v", manifest.URI, err)
}
return &Manifest{
Filename: filepath.Base(manifest.URI),
Content: content,
}, nil
}
| 29 |
eks-anywhere | aws | Go | package bundles
import (
"fmt"
eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/manifests/eksd"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type Reader interface {
ReadFile(url string) ([]byte, error)
}
func Read(reader Reader, url string) (*releasev1.Bundles, error) {
logger.V(4).Info("Reading bundles manifest", "url", url)
content, err := reader.ReadFile(url)
if err != nil {
return nil, err
}
bundles := &releasev1.Bundles{}
if err = yaml.Unmarshal(content, bundles); err != nil {
return nil, fmt.Errorf("failed to unmarshal bundles manifest from [%s]: %v", url, err)
}
return bundles, nil
}
func ReadEKSD(reader Reader, versionsBundle releasev1.VersionsBundle) (*eksdv1.Release, error) {
return eksd.ReadManifest(reader, versionsBundle.EksD.EksDReleaseUrl)
}
| 36 |
eks-anywhere | aws | Go | package bundles_test
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test/mocks"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestRead(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
manifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Bundles
metadata:
name: bundles-1`
wantBundles := &releasev1.Bundles{
TypeMeta: metav1.TypeMeta{
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
Kind: "Bundles",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bundles-1",
},
}
reader.EXPECT().ReadFile(url).Return([]byte(manifest), nil)
g.Expect(bundles.Read(reader, url)).To(Equal(wantBundles))
}
func TestReadErrorReading(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
reader.EXPECT().ReadFile(url).Return(nil, errors.New("error reading"))
_, err := bundles.Read(reader, url)
g.Expect(err).To(MatchError(ContainSubstring("error reading")))
}
func TestReadErrorUnmarshaling(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
manifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Bundles
metadata:
name: {}`
reader.EXPECT().ReadFile(url).Return([]byte(manifest), nil)
_, err := bundles.Read(reader, url)
g.Expect(err).To(MatchError(ContainSubstring("failed to unmarshal bundles manifest from [url]:")))
}
| 70 |
eks-anywhere | aws | Go | package bundles
import (
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func VersionsBundleForKubernetesVersion(bundles *releasev1.Bundles, kubeVersion string) *releasev1.VersionsBundle {
for _, versionsBundle := range bundles.Spec.VersionsBundles {
if versionsBundle.KubeVersion == kubeVersion {
return &versionsBundle
}
}
return nil
}
| 15 |
eks-anywhere | aws | Go | package bundles_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestVersionsBundleForKubernetesVersion(t *testing.T) {
versionsBundle121 := releasev1.VersionsBundle{KubeVersion: "1.21"}
versionsBundle122 := releasev1.VersionsBundle{KubeVersion: "1.22"}
b := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
versionsBundle121,
versionsBundle122,
},
},
}
tests := []struct {
name string
kubeVersion string
want *releasev1.VersionsBundle
}{
{
name: "supported version",
kubeVersion: "1.21",
want: &versionsBundle121,
},
{
name: "unsupported version",
kubeVersion: "1.10",
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(bundles.VersionsBundleForKubernetesVersion(b, tt.kubeVersion)).To(Equal(tt.want))
})
}
}
| 46 |
eks-anywhere | aws | Go | package eksd
import (
eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
)
func Images(release *eksdv1.Release) []eksdv1.Asset {
images := []eksdv1.Asset{}
for _, component := range release.Status.Components {
for _, asset := range component.Assets {
if asset.Image != nil {
images = append(images, asset)
}
}
}
return images
}
| 18 |
eks-anywhere | aws | Go | package eksd_test
import (
"testing"
eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/manifests/eksd"
)
func TestImages(t *testing.T) {
g := NewWithT(t)
image1 := eksdv1.Asset{Name: "image1", Image: &eksdv1.AssetImage{}}
image2 := eksdv1.Asset{Name: "image2", Image: &eksdv1.AssetImage{}}
image3 := eksdv1.Asset{Name: "image3", Image: &eksdv1.AssetImage{}}
wantImages := []eksdv1.Asset{image1, image2, image3}
r := &eksdv1.Release{
Status: eksdv1.ReleaseStatus{
Components: []eksdv1.Component{
{
Assets: []eksdv1.Asset{image1, image2},
},
{
Assets: []eksdv1.Asset{
image3,
{Name: "artifact", Archive: &eksdv1.AssetArchive{}},
},
},
},
},
}
g.Expect(eksd.Images(r)).To(Equal(wantImages))
}
| 38 |
eks-anywhere | aws | Go | package eksd
import (
"fmt"
eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
"sigs.k8s.io/yaml"
)
type Reader interface {
ReadFile(url string) ([]byte, error)
}
func ReadManifest(reader Reader, url string) (*eksdv1.Release, error) {
content, err := reader.ReadFile(url)
if err != nil {
return nil, err
}
eksd := &eksdv1.Release{}
if err = yaml.Unmarshal(content, eksd); err != nil {
return nil, fmt.Errorf("failed to unmarshal eksd manifest: %v", err)
}
return eksd, nil
}
| 27 |
eks-anywhere | aws | Go | package eksd_test
import (
"errors"
"testing"
eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test/mocks"
"github.com/aws/eks-anywhere/pkg/manifests/eksd"
)
func TestReadManifest(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
manifest := `apiVersion: distro.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: kubernetes-1-19-eks-4`
wantRelease := &eksdv1.Release{
TypeMeta: metav1.TypeMeta{
APIVersion: "distro.eks.amazonaws.com/v1alpha1",
Kind: "Release",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubernetes-1-19-eks-4",
},
}
reader.EXPECT().ReadFile(url).Return([]byte(manifest), nil)
g.Expect(eksd.ReadManifest(reader, url)).To(Equal(wantRelease))
}
func TestReadManifestErrorReading(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
reader.EXPECT().ReadFile(url).Return(nil, errors.New("error reading"))
_, err := eksd.ReadManifest(reader, url)
g.Expect(err).To(MatchError(ContainSubstring("error reading")))
}
func TestReadManifestErrorUnmarshaling(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
manifest := `apiVersion: distro.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: {}`
reader.EXPECT().ReadFile(url).Return([]byte(manifest), nil)
_, err := eksd.ReadManifest(reader, url)
g.Expect(err).To(MatchError(ContainSubstring("failed to unmarshal eksd manifest: error unmarshaling JSON:")))
}
| 70 |
eks-anywhere | aws | Go | package releases
import (
"fmt"
"github.com/pkg/errors"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/semver"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
// manifestURL holds the url to the eksa releases manifest
// this is injected at build time, this is just a sane default for development.
var manifestURL = "https://dev-release-assets.eks-anywhere.model-rocket.aws.dev/eks-a-release.yaml"
func ManifestURL() string {
return manifestURL
}
type Reader interface {
ReadFile(url string) ([]byte, error)
}
func ReadReleases(reader Reader) (*releasev1.Release, error) {
return ReadReleasesFromURL(reader, ManifestURL())
}
func ReadReleasesFromURL(reader Reader, url string) (*releasev1.Release, error) {
content, err := reader.ReadFile(url)
if err != nil {
return nil, errors.Wrapf(err, "reading Releases file")
}
release := &releasev1.Release{}
if err = yaml.Unmarshal(content, release); err != nil {
return nil, fmt.Errorf("failed to unmarshal release manifest from [%s]: %v", url, err)
}
return release, nil
}
func ReadBundlesForRelease(reader Reader, release *releasev1.EksARelease) (*releasev1.Bundles, error) {
return bundles.Read(reader, release.BundleManifestUrl)
}
func ReleaseForVersion(releases *releasev1.Release, version string) (*releasev1.EksARelease, error) {
semVer, err := semver.New(version)
if err != nil {
return nil, fmt.Errorf("invalid eksa version: %v", err)
}
for _, r := range releases.Spec.Releases {
if r.Version == version {
return &r, nil
}
releaseVersion, err := semver.New(r.Version)
if err != nil {
return nil, fmt.Errorf("invalid version for release %d: %v", r.Number, err)
}
if semVer.SamePrerelease(releaseVersion) {
return &r, nil
}
}
return nil, nil
}
| 71 |
eks-anywhere | aws | Go | package releases_test
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test/mocks"
"github.com/aws/eks-anywhere/pkg/manifests/releases"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestReadReleasesFromURL(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
manifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: release-1`
wantRelease := &releasev1.Release{
TypeMeta: metav1.TypeMeta{
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
Kind: "Release",
},
ObjectMeta: metav1.ObjectMeta{
Name: "release-1",
},
}
reader.EXPECT().ReadFile(url).Return([]byte(manifest), nil)
g.Expect(releases.ReadReleasesFromURL(reader, url)).To(Equal(wantRelease))
}
func TestReadReleasesFromURLErrorReading(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
reader.EXPECT().ReadFile(url).Return(nil, errors.New("error reading"))
_, err := releases.ReadReleasesFromURL(reader, url)
g.Expect(err).To(MatchError(ContainSubstring("error reading")))
}
func TestReadReleasesFromURLErrorUnmarshaling(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
url := "url"
manifest := `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Release
metadata:
name: {}`
reader.EXPECT().ReadFile(url).Return([]byte(manifest), nil)
_, err := releases.ReadReleasesFromURL(reader, url)
g.Expect(err).To(MatchError(ContainSubstring("failed to unmarshal release manifest from [url]:")))
}
func TestReleaseForVersionSuccess(t *testing.T) {
tests := []struct {
name string
releases *releasev1.Release
version string
want *releasev1.EksARelease
}{
{
name: "multiple releases same patch, different prerelease",
releases: &releasev1.Release{
Spec: releasev1.ReleaseSpec{
Releases: []releasev1.EksARelease{
{Version: "v0.0.1", Number: 1},
{Version: "v0.0.1-dev", Number: 2},
{Version: "v0.0.1-alpha", Number: 3},
{Version: "v0.0.1-beta", Number: 4},
},
},
},
version: "v0.0.1-alpha",
want: &releasev1.EksARelease{Version: "v0.0.1-alpha", Number: 3},
},
{
name: "multiple releases same patch, same prerelease, different build metadata",
releases: &releasev1.Release{
Spec: releasev1.ReleaseSpec{
Releases: []releasev1.EksARelease{
{Version: "v0.0.1-alpha+werwe", Number: 1},
{Version: "v0.0.1-alpha+f4fe", Number: 2},
{Version: "v0.0.1-alpha+f43fs", Number: 3},
{Version: "v0.0.1-alpha+f234f", Number: 4},
},
},
},
version: "v0.0.1-alpha",
want: &releasev1.EksARelease{Version: "v0.0.1-alpha+werwe", Number: 1},
},
{
name: "version doesn't exist",
releases: &releasev1.Release{
Spec: releasev1.ReleaseSpec{
Releases: []releasev1.EksARelease{
{Version: "v0.0.1-alpha+werwe", Number: 1},
{Version: "v0.0.1-alpha+f4fe", Number: 2},
{Version: "v0.0.1-alpha+f43fs", Number: 3},
{Version: "v0.0.1-alpha+f234f", Number: 4},
},
},
},
version: "v0.0.2-alpha",
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(releases.ReleaseForVersion(tt.releases, tt.version)).To(Equal(tt.want))
})
}
}
func TestReleaseForVersionError(t *testing.T) {
tests := []struct {
name string
releases *releasev1.Release
version string
want string
}{
{
name: "invalid version",
version: "x.x.x",
want: "invalid eksa version",
},
{
name: "invalid version in releases",
releases: &releasev1.Release{
Spec: releasev1.ReleaseSpec{
Releases: []releasev1.EksARelease{
{Version: "v0.0.1", Number: 1},
{Version: "vx.x.x", Number: 2},
},
},
},
version: "1.1.1",
want: "invalid version for release 2",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
_, err := releases.ReleaseForVersion(tt.releases, tt.version)
g.Expect(err).To(MatchError(ContainSubstring(tt.want)))
})
}
}
| 166 |
eks-anywhere | aws | Go | package cilium
import (
"github.com/aws/eks-anywhere/pkg/constants"
)
const namespace = constants.KubeSystemNamespace
// InstallUpgradeTemplater is the composition of InstallTemplater and UpgradeTemplater.
type InstallUpgradeTemplater interface {
InstallTemplater
UpgradeTemplater
}
// Cilium allows to install and upgrade the Cilium CNI in clusters.
type Cilium struct {
*Upgrader
*Installer
}
// NewCilium constructs a new Cilium.
func NewCilium(client KubernetesClient, templater InstallUpgradeTemplater) *Cilium {
return &Cilium{
Installer: NewInstaller(client, templater),
Upgrader: NewUpgrader(client, templater),
}
}
| 28 |
eks-anywhere | aws | Go | package cilium_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
v1alpha12 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/networking/cilium/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type ciliumtest struct {
*WithT
ctx context.Context
client *mocks.MockKubernetesClient
h *mocks.MockHelm
installTemplater *mocks.MockInstallTemplater
cluster *types.Cluster
spec *cluster.Spec
ciliumValues []byte
}
func newCiliumTest(t *testing.T) *ciliumtest {
ctrl := gomock.NewController(t)
h := mocks.NewMockHelm(ctrl)
client := mocks.NewMockKubernetesClient(ctrl)
installTemplater := mocks.NewMockInstallTemplater(ctrl)
return &ciliumtest{
WithT: NewWithT(t),
ctx: context.Background(),
client: client,
h: h,
installTemplater: installTemplater,
cluster: &types.Cluster{
Name: "w-cluster",
KubeconfigFile: "config.kubeconfig",
},
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium = v1alpha1.CiliumBundle{
Cilium: v1alpha1.Image{
URI: "public.ecr.aws/isovalent/cilium:v1.9.13-eksa.2",
},
Operator: v1alpha1.Image{
URI: "public.ecr.aws/isovalent/operator-generic:v1.9.13-eksa.2",
},
HelmChart: v1alpha1.Image{
Name: "cilium-chart",
URI: "public.ecr.aws/isovalent/cilium:1.9.13-eksa.2",
},
}
s.VersionsBundle.KubeDistro.Kubernetes.Tag = "v1.21.9-eks-1-21-10"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha12.CNIConfig{Cilium: &v1alpha12.CiliumConfig{}}
}),
ciliumValues: []byte("manifest"),
}
}
| 63 |
eks-anywhere | aws | Go | package cilium
import (
"context"
"time"
v1 "k8s.io/api/apps/v1"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
// DaemonSetName is the default name for the Cilium DS installed in EKS-A clusters.
DaemonSetName = "cilium"
// PreflightDaemonSetName is the default name for the Cilium preflight DS installed
// in EKS-A clusters during Cilium upgrades.
PreflightDaemonSetName = "cilium-pre-flight-check"
DeploymentName = "cilium-operator"
PreflightDeploymentName = "cilium-pre-flight-check"
// ConfigMapName is the default name for the Cilium ConfigMap
// containing Cilium's configuration.
ConfigMapName = "cilium-config"
)
// Client allows to interact with the Kubernetes API.
type Client interface {
ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error
DeleteKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error
GetDaemonSet(ctx context.Context, name, namespace, kubeconfig string) (*v1.DaemonSet, error)
GetDeployment(ctx context.Context, name, namespace, kubeconfig string) (*v1.Deployment, error)
RolloutRestartDaemonSet(ctx context.Context, name, namespace, kubeconfig string) error
}
// RetrierClient wraps basic kubernetes API operations around a retrier.
type RetrierClient struct {
client Client
retrier *retrier.Retrier
}
// RetrierClientOpt allows to customize a RetrierClient
// on construction.
type RetrierClientOpt func(*RetrierClient)
// RetrierClientRetrier allows to use a custom retrier.
func RetrierClientRetrier(retrier *retrier.Retrier) RetrierClientOpt {
return func(u *RetrierClient) {
u.retrier = retrier
}
}
// NewRetrier constructs a new RetrierClient.
func NewRetrier(client Client, opts ...RetrierClientOpt) *RetrierClient {
c := &RetrierClient{
client: client,
retrier: retrier.New(5 * time.Minute),
}
for _, opt := range opts {
opt(c)
}
return c
}
// Apply creates/updates the objects provided by the yaml document in the cluster.
func (c *RetrierClient) Apply(ctx context.Context, cluster *types.Cluster, data []byte) error {
return c.retrier.Retry(
func() error {
return c.client.ApplyKubeSpecFromBytes(ctx, cluster, data)
},
)
}
// Delete deletes the objects defined in the yaml document from the cluster.
func (c *RetrierClient) Delete(ctx context.Context, cluster *types.Cluster, data []byte) error {
return c.retrier.Retry(
func() error {
return c.client.DeleteKubeSpecFromBytes(ctx, cluster, data)
},
)
}
// WaitForPreflightDaemonSet blocks until the Cilium preflight DS installed during upgrades
// becomes ready or until the timeout expires.
func (c *RetrierClient) WaitForPreflightDaemonSet(ctx context.Context, cluster *types.Cluster) error {
return c.retrier.Retry(
func() error {
return c.checkPreflightDaemonSetReady(ctx, cluster)
},
)
}
func (c *RetrierClient) checkPreflightDaemonSetReady(ctx context.Context, cluster *types.Cluster) error {
ciliumDaemonSet, err := c.client.GetDaemonSet(ctx, DaemonSetName, namespace, cluster.KubeconfigFile)
if err != nil {
return err
}
if err := CheckDaemonSetReady(ciliumDaemonSet); err != nil {
return err
}
preflightDaemonSet, err := c.client.GetDaemonSet(ctx, PreflightDaemonSetName, namespace, cluster.KubeconfigFile)
if err != nil {
return err
}
if err := CheckPreflightDaemonSetReady(ciliumDaemonSet, preflightDaemonSet); err != nil {
return err
}
return nil
}
// WaitForPreflightDeployment blocks until the Cilium preflight Deployment installed during upgrades
// becomes ready or until the timeout expires.
func (c *RetrierClient) WaitForPreflightDeployment(ctx context.Context, cluster *types.Cluster) error {
return c.retrier.Retry(
func() error {
return c.checkPreflightDeploymentReady(ctx, cluster)
},
)
}
func (c *RetrierClient) checkPreflightDeploymentReady(ctx context.Context, cluster *types.Cluster) error {
preflightDeployment, err := c.client.GetDeployment(ctx, PreflightDeploymentName, namespace, cluster.KubeconfigFile)
if err != nil {
return err
}
if err := CheckDeploymentReady(preflightDeployment); err != nil {
return err
}
return nil
}
// WaitForCiliumDaemonSet blocks until the Cilium DS installed as part of the default
// Cilium installation becomes ready or until the timeout expires.
func (c *RetrierClient) WaitForCiliumDaemonSet(ctx context.Context, cluster *types.Cluster) error {
return c.retrier.Retry(
func() error {
return c.checkCiliumDaemonSetReady(ctx, cluster)
},
)
}
// RolloutRestartCiliumDaemonSet triggers a rollout restart of the Cilium DS installed
// as part of the default Cilium installation.
func (c *RetrierClient) RolloutRestartCiliumDaemonSet(ctx context.Context, cluster *types.Cluster) error {
return c.retrier.Retry(
func() error {
return c.client.RolloutRestartDaemonSet(ctx, DaemonSetName, namespace, cluster.KubeconfigFile)
},
)
}
func (c *RetrierClient) checkCiliumDaemonSetReady(ctx context.Context, cluster *types.Cluster) error {
daemonSet, err := c.client.GetDaemonSet(ctx, DaemonSetName, namespace, cluster.KubeconfigFile)
if err != nil {
return err
}
if err := CheckDaemonSetReady(daemonSet); err != nil {
return err
}
return nil
}
// WaitForCiliumDeployment blocks until the Cilium Deployment installed as part of the default
// Cilium installation becomes ready or until the timeout expires.
func (c *RetrierClient) WaitForCiliumDeployment(ctx context.Context, cluster *types.Cluster) error {
return c.retrier.Retry(
func() error {
return c.checkCiliumDeploymentReady(ctx, cluster)
},
)
}
func (c *RetrierClient) checkCiliumDeploymentReady(ctx context.Context, cluster *types.Cluster) error {
deployment, err := c.client.GetDeployment(ctx, DeploymentName, namespace, cluster.KubeconfigFile)
if err != nil {
return err
}
if err := CheckDeploymentReady(deployment); err != nil {
return err
}
return nil
}
| 194 |
eks-anywhere | aws | Go | package cilium_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/networking/cilium/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
type retrierTest struct {
*WithT
ctx context.Context
r *cilium.RetrierClient
c *mocks.MockClient
cluster *types.Cluster
}
func newRetrierTest(t *testing.T) *retrierTest {
ctrl := gomock.NewController(t)
c := mocks.NewMockClient(ctrl)
return &retrierTest{
WithT: NewWithT(t),
ctx: context.Background(),
r: cilium.NewRetrier(c),
c: c,
cluster: &types.Cluster{
KubeconfigFile: "kubeconfig",
},
}
}
func TestRetrierClientApplySuccess(t *testing.T) {
tt := newRetrierTest(t)
data := []byte("data")
tt.c.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(errors.New("error in apply")).Times(5)
tt.c.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(nil).Times(1)
tt.Expect(tt.r.Apply(tt.ctx, tt.cluster, data)).To(Succeed(), "retrierClient.apply() should succeed after 6 tries")
}
func TestRetrierClientApplyError(t *testing.T) {
tt := newRetrierTest(t)
tt.r = cilium.NewRetrier(tt.c, cilium.RetrierClientRetrier(retrier.NewWithMaxRetries(5, 0)))
data := []byte("data")
tt.c.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(errors.New("error in apply")).Times(5)
tt.c.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(nil).AnyTimes()
tt.Expect(tt.r.Apply(tt.ctx, tt.cluster, data)).To(MatchError(ContainSubstring("error in apply")), "retrierClient.apply() should fail after 5 tries")
}
func TestRetrierClientDeleteSuccess(t *testing.T) {
tt := newRetrierTest(t)
data := []byte("data")
tt.c.EXPECT().DeleteKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(errors.New("error in delete")).Times(5)
tt.c.EXPECT().DeleteKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(nil).Times(1)
tt.Expect(tt.r.Delete(tt.ctx, tt.cluster, data)).To(Succeed(), "retrierClient.Delete() should succeed after 6 tries")
}
func TestRetrierClientDeleteError(t *testing.T) {
tt := newRetrierTest(t)
tt.r = cilium.NewRetrier(tt.c, cilium.RetrierClientRetrier(retrier.NewWithMaxRetries(5, 0)))
data := []byte("data")
tt.c.EXPECT().DeleteKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(errors.New("error in delete")).Times(5)
tt.c.EXPECT().DeleteKubeSpecFromBytes(tt.ctx, tt.cluster, data).Return(nil).AnyTimes()
tt.Expect(tt.r.Delete(tt.ctx, tt.cluster, data)).To(MatchError(ContainSubstring("error in delete")), "retrierClient.Delete() should fail after 5 tries")
}
type waitForCiliumTest struct {
*retrierTest
ciliumDaemonSet, preflightDaemonSet *v1.DaemonSet
ciliumDeployment, preflightDeployment *v1.Deployment
}
func newWaitForCiliumTest(t *testing.T) *waitForCiliumTest {
return &waitForCiliumTest{
retrierTest: newRetrierTest(t),
ciliumDaemonSet: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
preflightDaemonSet: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds-pre",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
ciliumDeployment: &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "dep",
},
Status: v1.DeploymentStatus{
Replicas: 5,
ReadyReplicas: 5,
},
},
preflightDeployment: &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "dep-pre",
},
Status: v1.DeploymentStatus{
Replicas: 5,
ReadyReplicas: 5,
},
},
}
}
func TestRetrierClientWaitForPreflightDaemonSetSuccess(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(tt.ciliumDaemonSet, nil)
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium-pre-flight-check", "kube-system", tt.cluster.KubeconfigFile).Return(tt.preflightDaemonSet, nil)
tt.Expect(tt.r.WaitForPreflightDaemonSet(tt.ctx, tt.cluster)).To(Succeed(), "retrierClient.waitForPreflightDaemonSet() should succeed after 6 tries")
}
func TestRetrierClientWaitForPreflightDaemonSetError(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.r = cilium.NewRetrier(tt.c, cilium.RetrierClientRetrier(retrier.NewWithMaxRetries(5, 0)))
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(tt.ciliumDaemonSet, nil).AnyTimes()
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium-pre-flight-check", "kube-system", tt.cluster.KubeconfigFile).Return(tt.preflightDaemonSet, nil).AnyTimes()
tt.Expect(tt.r.WaitForPreflightDaemonSet(tt.ctx, tt.cluster)).To(MatchError(ContainSubstring("error in get")), "retrierClient.waitForPreflightDaemonSet() should fail after 5 tries")
}
func TestRetrierClientRolloutRestartDaemonSetSuccess(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.c.EXPECT().RolloutRestartDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(errors.New("error in rollout")).Times(5)
tt.c.EXPECT().RolloutRestartDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(nil)
tt.Expect(tt.r.RolloutRestartCiliumDaemonSet(tt.ctx, tt.cluster)).To(Succeed(), "retrierClient.RolloutRestartDaemonSet() should succeed after 6 tries")
}
func TestRetrierClientRolloutRestartDaemonSetError(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.r = cilium.NewRetrier(tt.c, cilium.RetrierClientRetrier(retrier.NewWithMaxRetries(5, 0)))
tt.c.EXPECT().RolloutRestartDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(errors.New("error in rollout")).Times(5)
tt.c.EXPECT().RolloutRestartDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(nil).AnyTimes()
tt.Expect(tt.r.RolloutRestartCiliumDaemonSet(tt.ctx, tt.cluster)).To(MatchError(ContainSubstring("error in rollout")), "retrierClient.RolloutRestartCiliumDaemonSet() should fail after 5 tries")
}
func TestRetrierClientWaitForPreflightDeploymentSuccess(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-pre-flight-check", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-pre-flight-check", "kube-system", tt.cluster.KubeconfigFile).Return(tt.preflightDeployment, nil)
tt.Expect(tt.r.WaitForPreflightDeployment(tt.ctx, tt.cluster)).To(Succeed(), "retrierClient.waitForPreflightDeployment() should succeed after 6 tries")
}
func TestRetrierClientWaitForPreflightDeploymentError(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.r = cilium.NewRetrier(tt.c, cilium.RetrierClientRetrier(retrier.NewWithMaxRetries(5, 0)))
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-pre-flight-check", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-pre-flight-check", "kube-system", tt.cluster.KubeconfigFile).Return(tt.preflightDeployment, nil).AnyTimes()
tt.Expect(tt.r.WaitForPreflightDeployment(tt.ctx, tt.cluster)).To(MatchError(ContainSubstring("error in get")), "retrierClient.waitForPreflightDeployment() should fail after 5 tries")
}
func TestRetrierClientWaitForCiliumDaemonSetSuccess(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(tt.ciliumDaemonSet, nil)
tt.Expect(tt.r.WaitForCiliumDaemonSet(tt.ctx, tt.cluster)).To(Succeed(), "retrierClient.waitForCiliumDaemonSet() should succeed after 6 tries")
}
func TestRetrierClientWaitForCiliumDaemonSetError(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.r = cilium.NewRetrier(tt.c, cilium.RetrierClientRetrier(retrier.NewWithMaxRetries(5, 0)))
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDaemonSet(tt.ctx, "cilium", "kube-system", tt.cluster.KubeconfigFile).Return(tt.ciliumDaemonSet, nil).AnyTimes()
tt.Expect(tt.r.WaitForCiliumDaemonSet(tt.ctx, tt.cluster)).To(MatchError(ContainSubstring("error in get")), "retrierClient.waitForCiliumDaemonSet() should fail after 5 tries")
}
func TestRetrierClientWaitForCiliumDeploymentSuccess(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-operator", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-operator", "kube-system", tt.cluster.KubeconfigFile).Return(tt.ciliumDeployment, nil)
tt.Expect(tt.r.WaitForCiliumDeployment(tt.ctx, tt.cluster)).To(Succeed(), "retrierClient.waitForCiliumDeployment() should succeed after 6 tries")
}
func TestRetrierClientWaitForCiliumDeploymentError(t *testing.T) {
tt := newWaitForCiliumTest(t)
tt.r = cilium.NewRetrier(tt.c, cilium.RetrierClientRetrier(retrier.NewWithMaxRetries(5, 0)))
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-operator", "kube-system", tt.cluster.KubeconfigFile).Return(nil, errors.New("error in get")).Times(5)
tt.c.EXPECT().GetDeployment(tt.ctx, "cilium-operator", "kube-system", tt.cluster.KubeconfigFile).Return(tt.ciliumDeployment, nil).AnyTimes()
tt.Expect(tt.r.WaitForCiliumDeployment(tt.ctx, tt.cluster)).To(MatchError(ContainSubstring("error in get")), "retrierClient.waitForCiliumDeployment() should fail after 5 tries")
}
| 213 |
eks-anywhere | aws | Go | package cilium
import "github.com/aws/eks-anywhere/pkg/cluster"
// Config defines a Cilium installation for an EKS-A cluster.
type Config struct {
// AllowedNamespaces defines k8s namespaces from/which traffic is allowed
// when PolicyEnforcementMode is Always. For other values of PolicyEnforcementMode
// it is ignored.
AllowedNamespaces []string
// Spec is the complete EKS-A cluster definition
Spec *cluster.Spec
// TODO(gaslor): we should try to reduce down the dependency here and narrow it down
// to the bare minimun. This requires to refactor the templater to not depend on the
// cluster spec
}
| 18 |
eks-anywhere | aws | Go | package cilium
import (
"context"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/constants"
)
const (
ciliumConfigMapName = "cilium-config"
ciliumConfigNamespace = "kube-system"
)
// Installation is an installation of EKSA Cilium components.
type Installation struct {
DaemonSet *appsv1.DaemonSet
Operator *appsv1.Deployment
ConfigMap *corev1.ConfigMap
}
// Installed determines if all EKS-A Embedded Cilium components are present. It identifies
// EKS-A Embedded Cilium by the image name. If the ConfigMap doesn't exist we still considered
// Cilium is installed. The installation might not be complete but it can be functional.
func (i Installation) Installed() bool {
var isEKSACilium bool
if i.DaemonSet != nil {
for _, c := range i.DaemonSet.Spec.Template.Spec.Containers {
isEKSACilium = isEKSACilium || strings.Contains(c.Image, "eksa")
}
}
return i.DaemonSet != nil && i.Operator != nil && isEKSACilium
}
// GetInstallation creates a new Installation instance. The returned installation's DaemonSet,
// Operator and ConfigMap fields will be nil if they could not be found within the target cluster.
func GetInstallation(ctx context.Context, client client.Client) (*Installation, error) {
ds, err := getDaemonSet(ctx, client)
if err != nil {
return nil, err
}
operator, err := getDeployment(ctx, client)
if err != nil {
return nil, err
}
cm, err := getConfigMap(ctx, client, ciliumConfigMapName, ciliumConfigNamespace)
if err != nil {
return nil, err
}
return &Installation{
DaemonSet: ds,
Operator: operator,
ConfigMap: cm,
}, nil
}
func getDaemonSet(ctx context.Context, client client.Client) (*appsv1.DaemonSet, error) {
ds := &appsv1.DaemonSet{}
err := client.Get(ctx, types.NamespacedName{Name: DaemonSetName, Namespace: constants.KubeSystemNamespace}, ds)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return ds, nil
}
func getConfigMap(ctx context.Context, client client.Client, name string, namespace string) (*corev1.ConfigMap, error) {
c := &corev1.ConfigMap{}
err := client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, c)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return c, nil
}
func getDeployment(ctx context.Context, client client.Client) (*appsv1.Deployment, error) {
deployment := &appsv1.Deployment{}
key := types.NamespacedName{
Name: DeploymentName,
Namespace: constants.KubeSystemNamespace,
}
err := client.Get(ctx, key, deployment)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return deployment, nil
}
| 108 |
eks-anywhere | aws | Go | package cilium_test
import (
"testing"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
)
func TestInstallationInstalled(t *testing.T) {
tests := []struct {
name string
installation cilium.Installation
want bool
}{
{
name: "installed",
installation: cilium.Installation{
DaemonSet: &appsv1.DaemonSet{
Spec: appsv1.DaemonSetSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "cilium-eksa"},
},
},
},
},
},
Operator: &appsv1.Deployment{},
},
want: true,
},
{
name: "ds not installed",
installation: cilium.Installation{
Operator: &appsv1.Deployment{},
},
want: false,
},
{
name: "ds not installed with eksa cilium",
installation: cilium.Installation{
DaemonSet: &appsv1.DaemonSet{
Spec: appsv1.DaemonSetSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "cilium"},
},
},
},
},
},
Operator: &appsv1.Deployment{},
},
want: false,
},
{
name: "operator not installed",
installation: cilium.Installation{
DaemonSet: &appsv1.DaemonSet{},
},
want: false,
},
{
name: "none installed",
installation: cilium.Installation{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.installation.Installed()).To(Equal(tt.want))
})
}
}
| 82 |
eks-anywhere | aws | Go | package cilium
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/types"
)
// InstallerForSpec allows to configure Cilium for a particular EKS-A cluster
// It's a stateful version of installer, with a fixed Cilium config.
type InstallerForSpec struct {
installer Installer
config Config
}
// NewInstallerForSpec constructs a new InstallerForSpec.
func NewInstallerForSpec(client KubernetesClient, templater InstallTemplater, config Config) *InstallerForSpec {
return &InstallerForSpec{
installer: *NewInstaller(client, templater),
config: config,
}
}
// Install installs Cilium in an cluster.
func (i *InstallerForSpec) Install(ctx context.Context, cluster *types.Cluster) error {
return i.installer.Install(ctx, cluster, i.config.Spec, i.config.AllowedNamespaces)
}
// InstallTemplater generates a Cilium manifest for installation.
type InstallTemplater interface {
GenerateManifest(ctx context.Context, spec *cluster.Spec, opts ...ManifestOpt) ([]byte, error)
}
// Installer allows to configure Cilium in a cluster.
type Installer struct {
templater InstallTemplater
k8s KubernetesClient
}
// NewInstaller constructs a new Installer.
func NewInstaller(client KubernetesClient, templater InstallTemplater) *Installer {
return &Installer{
templater: templater,
k8s: client,
}
}
// Install configures Cilium in an EKS-A cluster.
func (i *Installer) Install(ctx context.Context, cluster *types.Cluster, spec *cluster.Spec, allowedNamespaces []string) error {
manifest, err := i.templater.GenerateManifest(ctx,
spec,
WithPolicyAllowedNamespaces(allowedNamespaces),
)
if err != nil {
return fmt.Errorf("generating Cilium manifest for install: %v", err)
}
if err = i.k8s.Apply(ctx, cluster, manifest); err != nil {
return fmt.Errorf("applying Cilium manifest for install: %v", err)
}
return nil
}
| 66 |
eks-anywhere | aws | Go | package cilium_test
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
)
func TestInstallerInstallErrorGeneratingManifest(t *testing.T) {
tt := newCiliumTest(t)
installer := cilium.NewInstaller(tt.client, tt.installTemplater)
tt.installTemplater.EXPECT().GenerateManifest(
tt.ctx, tt.spec, gomock.Not(gomock.Nil()),
).Return(nil, errors.New("generating manifest"))
tt.Expect(
installer.Install(tt.ctx, tt.cluster, tt.spec, nil),
).To(
MatchError(ContainSubstring("generating Cilium manifest for install: generating manifest")),
)
}
func TestInstallerInstallErrorApplyingManifest(t *testing.T) {
tt := newCiliumTest(t)
installer := cilium.NewInstaller(tt.client, tt.installTemplater)
tt.installTemplater.EXPECT().GenerateManifest(
tt.ctx, tt.spec, gomock.Not(gomock.Nil()),
).Return(tt.ciliumValues, nil)
tt.client.EXPECT().Apply(
tt.ctx, tt.cluster, tt.ciliumValues,
).Return(errors.New("applying"))
tt.Expect(
installer.Install(tt.ctx, tt.cluster, tt.spec, nil),
).To(
MatchError(ContainSubstring("applying Cilium manifest for install: applying")),
)
}
func TestInstallerInstallSuccess(t *testing.T) {
tt := newCiliumTest(t)
installer := cilium.NewInstaller(tt.client, tt.installTemplater)
tt.installTemplater.EXPECT().GenerateManifest(
tt.ctx, tt.spec, gomock.Not(gomock.Nil()),
).Return(tt.ciliumValues, nil)
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.ciliumValues)
tt.Expect(
installer.Install(tt.ctx, tt.cluster, tt.spec, nil),
).To(Succeed())
}
func TestInstallForSpecInstallSuccess(t *testing.T) {
tt := newCiliumTest(t)
config := cilium.Config{
Spec: tt.spec,
AllowedNamespaces: []string{"my-namespace"},
}
installer := cilium.NewInstallerForSpec(tt.client, tt.installTemplater, config)
tt.installTemplater.EXPECT().GenerateManifest(
tt.ctx, tt.spec, gomock.Not(gomock.Nil()),
).Return(tt.ciliumValues, nil)
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.ciliumValues)
tt.Expect(
installer.Install(tt.ctx, tt.cluster),
).To(Succeed())
}
| 76 |
eks-anywhere | aws | Go | package cilium
import (
"fmt"
v1 "k8s.io/api/apps/v1"
)
func CheckDaemonSetReady(daemonSet *v1.DaemonSet) error {
if err := checkDaemonSetObservedGeneration(daemonSet); err != nil {
return err
}
if daemonSet.Status.DesiredNumberScheduled != daemonSet.Status.NumberReady {
return fmt.Errorf("daemonSet %s is not ready: %d/%d ready", daemonSet.Name, daemonSet.Status.NumberReady, daemonSet.Status.DesiredNumberScheduled)
}
return nil
}
func CheckPreflightDaemonSetReady(ciliumDaemonSet, preflightDaemonSet *v1.DaemonSet) error {
if err := checkDaemonSetObservedGeneration(ciliumDaemonSet); err != nil {
return err
}
if err := checkDaemonSetObservedGeneration(preflightDaemonSet); err != nil {
return err
}
if ciliumDaemonSet.Status.NumberReady != preflightDaemonSet.Status.NumberReady {
return fmt.Errorf("cilium preflight check DS is not ready: %d want and %d ready", ciliumDaemonSet.Status.NumberReady, preflightDaemonSet.Status.NumberReady)
}
return nil
}
func CheckDeploymentReady(deployment *v1.Deployment) error {
if err := checkDeploymentObservedGeneration(deployment); err != nil {
return err
}
if deployment.Status.Replicas != deployment.Status.ReadyReplicas {
return fmt.Errorf("deployment %s is not ready: %d/%d ready", deployment.Name, deployment.Status.ReadyReplicas, deployment.Status.Replicas)
}
return nil
}
func checkDaemonSetObservedGeneration(daemonSet *v1.DaemonSet) error {
observedGeneration := daemonSet.Status.ObservedGeneration
generation := daemonSet.Generation
if observedGeneration != generation {
return fmt.Errorf("daemonSet %s status needs to be refreshed: observed generation is %d, want %d", daemonSet.Name, observedGeneration, generation)
}
return nil
}
func checkDeploymentObservedGeneration(deployment *v1.Deployment) error {
observedGeneration := deployment.Status.ObservedGeneration
generation := deployment.Generation
if observedGeneration != generation {
return fmt.Errorf("deployment %s status needs to be refreshed: observed generation is %d, want %d", deployment.Name, observedGeneration, generation)
}
return nil
}
| 64 |
eks-anywhere | aws | Go | package cilium
import (
"errors"
"testing"
. "github.com/onsi/gomega"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestCheckDaemonSetReady(t *testing.T) {
tests := []struct {
name string
daemonSet *v1.DaemonSet
wantErr error
}{
{
name: "old status",
daemonSet: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
Generation: 2,
},
Status: v1.DaemonSetStatus{
ObservedGeneration: 1,
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
wantErr: errors.New("daemonSet ds status needs to be refreshed: observed generation is 1, want 2"),
},
{
name: "ready",
daemonSet: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
},
{
name: "not ready",
daemonSet: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 4,
},
},
wantErr: errors.New("daemonSet ds is not ready: 4/5 ready"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := CheckDaemonSetReady(tt.daemonSet)
if tt.wantErr != nil {
g.Expect(err).To(MatchError(tt.wantErr))
return
}
g.Expect(err).To(Succeed())
})
}
}
func TestCheckPreflightDaemonSetReady(t *testing.T) {
tests := []struct {
name string
cilium, preflight *v1.DaemonSet
wantErr error
}{
{
name: "cilium old status",
cilium: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
Generation: 2,
},
Status: v1.DaemonSetStatus{
ObservedGeneration: 1,
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
preflight: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds-pre",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
wantErr: errors.New("daemonSet ds status needs to be refreshed: observed generation is 1, want 2"),
},
{
name: "pre-check old status",
cilium: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
preflight: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds-pre",
Generation: 2,
},
Status: v1.DaemonSetStatus{
ObservedGeneration: 1,
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
wantErr: errors.New("daemonSet ds-pre status needs to be refreshed: observed generation is 1, want 2"),
},
{
name: "ready",
cilium: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
preflight: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds-pre",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
},
{
name: "not ready",
cilium: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 5,
},
},
preflight: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds-pre",
},
Status: v1.DaemonSetStatus{
DesiredNumberScheduled: 5,
NumberReady: 4,
},
},
wantErr: errors.New("cilium preflight check DS is not ready: 5 want and 4 ready"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := CheckPreflightDaemonSetReady(tt.cilium, tt.preflight)
if tt.wantErr != nil {
g.Expect(err).To(MatchError(tt.wantErr))
return
}
g.Expect(err).To(Succeed())
})
}
}
func TestCheckDeploymentReady(t *testing.T) {
tests := []struct {
name string
deployment *v1.Deployment
wantErr error
}{
{
name: "old status",
deployment: &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "dep",
Generation: 2,
},
Status: v1.DeploymentStatus{
Replicas: 5,
ReadyReplicas: 5,
ObservedGeneration: 1,
},
},
wantErr: errors.New("deployment dep status needs to be refreshed: observed generation is 1, want 2"),
},
{
name: "ready",
deployment: &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "dep",
},
Status: v1.DeploymentStatus{
Replicas: 5,
ReadyReplicas: 5,
},
},
},
{
name: "not ready",
deployment: &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "dep",
},
Status: v1.DeploymentStatus{
Replicas: 5,
ReadyReplicas: 4,
},
},
wantErr: errors.New("deployment dep is not ready: 4/5 ready"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := CheckDeploymentReady(tt.deployment)
if tt.wantErr != nil {
g.Expect(err).To(MatchError(tt.wantErr))
return
}
g.Expect(err).To(Succeed())
})
}
}
| 242 |
eks-anywhere | aws | Go | package cilium
import (
"context"
_ "embed"
"fmt"
"net"
"strings"
"time"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/templater"
)
//go:embed network_policy.yaml
var networkPolicyAllowAll string
const (
maxRetries = 10
defaultBackOffPeriod = 5 * time.Second
)
type Helm interface {
Template(ctx context.Context, ociURI, version, namespace string, values interface{}, kubeVersion string) ([]byte, error)
RegistryLogin(ctx context.Context, registry, username, password string) error
}
type Templater struct {
helm Helm
}
func NewTemplater(helm Helm) *Templater {
return &Templater{
helm: helm,
}
}
func (t *Templater) GenerateUpgradePreflightManifest(ctx context.Context, spec *cluster.Spec) ([]byte, error) {
v := templateValues(spec)
v.set(true, "preflight", "enabled")
v.set(spec.VersionsBundle.Cilium.Cilium.Image(), "preflight", "image", "repository")
v.set(spec.VersionsBundle.Cilium.Cilium.Tag(), "preflight", "image", "tag")
v.set(false, "agent")
v.set(false, "operator", "enabled")
uri, version := getChartUriAndVersion(spec)
kubeVersion, err := getKubeVersionString(spec)
if err != nil {
return nil, err
}
manifest, err := t.helm.Template(ctx, uri, version, namespace, v, kubeVersion)
if err != nil {
return nil, fmt.Errorf("failed generating cilium upgrade preflight manifest: %v", err)
}
return manifest, nil
}
// ManifestOpt allows to modify options for a cilium manifest.
type ManifestOpt func(*ManifestConfig)
type ManifestConfig struct {
values values
retrier *retrier.Retrier
kubeVersion string
namespaces []string
}
// WithKubeVersion allows to generate the Cilium manifest for a different kubernetes version
// than the one specified in the cluster spec. Useful for upgrades scenarios where Cilium is upgraded before
// the kubernetes components.
func WithKubeVersion(kubeVersion string) ManifestOpt {
return func(c *ManifestConfig) {
c.kubeVersion = kubeVersion
}
}
// WithRetrier introduced for optimizing unit tests.
func WithRetrier(retrier *retrier.Retrier) ManifestOpt {
return func(c *ManifestConfig) {
c.retrier = retrier
}
}
// WithUpgradeFromVersion allows to specify the compatibility Cilium version to use in the manifest.
// This is necessary for Cilium upgrades.
func WithUpgradeFromVersion(version semver.Version) ManifestOpt {
return func(c *ManifestConfig) {
c.values.set(fmt.Sprintf("%d.%d", version.Major, version.Minor), "upgradeCompatibility")
}
}
// WithPolicyAllowedNamespaces allows to specify which namespaces traffic should be allowed when using
// and "Always" policy enforcement mode.
func WithPolicyAllowedNamespaces(namespaces []string) ManifestOpt {
return func(c *ManifestConfig) {
c.namespaces = namespaces
}
}
func (t *Templater) GenerateManifest(ctx context.Context, spec *cluster.Spec, opts ...ManifestOpt) ([]byte, error) {
kubeVersion, err := getKubeVersionString(spec)
if err != nil {
return nil, err
}
c := &ManifestConfig{
values: templateValues(spec),
kubeVersion: kubeVersion,
retrier: retrier.NewWithMaxRetries(maxRetries, defaultBackOffPeriod),
}
for _, o := range opts {
o(c)
}
uri, version := getChartUriAndVersion(spec)
var manifest []byte
if spec.Cluster.Spec.RegistryMirrorConfiguration != nil {
if spec.Cluster.Spec.RegistryMirrorConfiguration.Authenticate {
username, password, err := config.ReadCredentials()
if err != nil {
return nil, err
}
endpoint := net.JoinHostPort(spec.Cluster.Spec.RegistryMirrorConfiguration.Endpoint, spec.Cluster.Spec.RegistryMirrorConfiguration.Port)
if err := t.helm.RegistryLogin(ctx, endpoint, username, password); err != nil {
return nil, err
}
}
}
err = c.retrier.Retry(func() error {
manifest, err = t.helm.Template(ctx, uri, version, namespace, c.values, c.kubeVersion)
return err
})
if err != nil {
return nil, fmt.Errorf("failed generating cilium manifest: %v", err)
}
if spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode == anywherev1.CiliumPolicyModeAlways {
networkPolicyManifest, err := t.GenerateNetworkPolicyManifest(spec, c.namespaces)
if err != nil {
return nil, err
}
manifest = templater.AppendYamlResources(manifest, networkPolicyManifest)
}
return manifest, nil
}
func (t *Templater) GenerateNetworkPolicyManifest(spec *cluster.Spec, namespaces []string) ([]byte, error) {
values := map[string]interface{}{
"managementCluster": spec.Cluster.IsSelfManaged(),
"providerNamespaces": namespaces,
}
if spec.Cluster.Spec.GitOpsRef != nil {
values["gitopsEnabled"] = true
if spec.GitOpsConfig != nil {
values["fluxNamespace"] = spec.GitOpsConfig.Spec.Flux.Github.FluxSystemNamespace
}
}
return templater.Execute(networkPolicyAllowAll, values)
}
type values map[string]interface{}
func (c values) set(value interface{}, path ...string) {
element := c
for _, p := range path[:len(path)-1] {
e, ok := element[p]
if !ok {
e = values{}
element[p] = e
}
element = e.(values)
}
element[path[len(path)-1]] = value
}
func templateValues(spec *cluster.Spec) values {
val := values{
"cni": values{
"chainingMode": "portmap",
},
"ipam": values{
"mode": "kubernetes",
},
"identityAllocationMode": "crd",
"prometheus": values{
"enabled": true,
},
"rollOutCiliumPods": true,
"tunnel": "geneve",
"image": values{
"repository": spec.VersionsBundle.Cilium.Cilium.Image(),
"tag": spec.VersionsBundle.Cilium.Cilium.Tag(),
},
"operator": values{
"image": values{
// The chart expects an "incomplete" repository
// and will add the necessary suffix ("-generic" in our case)
"repository": strings.TrimSuffix(spec.VersionsBundle.Cilium.Operator.Image(), "-generic"),
"tag": spec.VersionsBundle.Cilium.Operator.Tag(),
},
"prometheus": values{
"enabled": true,
},
},
}
if len(spec.Cluster.Spec.WorkerNodeGroupConfigurations) == 0 && spec.Cluster.Spec.ControlPlaneConfiguration.Count == 1 {
val["operator"].(values)["replicas"] = 1
}
if spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode != "" {
val["policyEnforcementMode"] = spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode
}
if spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.EgressMasqueradeInterfaces != "" {
val["egressMasqueradeInterfaces"] = spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.EgressMasqueradeInterfaces
}
return val
}
func getChartUriAndVersion(spec *cluster.Spec) (uri, version string) {
chart := spec.VersionsBundle.Cilium.HelmChart
uri = fmt.Sprintf("oci://%s", chart.Image())
version = chart.Tag()
return uri, version
}
func getKubeVersion(spec *cluster.Spec) (*semver.Version, error) {
k8sVersion, err := semver.New(spec.VersionsBundle.KubeDistro.Kubernetes.Tag)
if err != nil {
return nil, fmt.Errorf("parsing kubernetes version %v: %v", spec.Cluster.Spec.KubernetesVersion, err)
}
return k8sVersion, nil
}
func getKubeVersionString(spec *cluster.Spec) (string, error) {
k8sVersion, err := getKubeVersion(spec)
if err != nil {
return "", err
}
return fmt.Sprintf("%d.%d", k8sVersion.Major, k8sVersion.Minor), nil
}
| 256 |
eks-anywhere | aws | Go | package cilium_test
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"reflect"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/networking/cilium/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/semver"
)
type templaterTest struct {
*WithT
ctx context.Context
t *cilium.Templater
h *mocks.MockHelm
manifest []byte
uri, version, namespace string
spec, currentSpec *cluster.Spec
}
func newtemplaterTest(t *testing.T) *templaterTest {
ctrl := gomock.NewController(t)
h := mocks.NewMockHelm(ctrl)
return &templaterTest{
WithT: NewWithT(t),
ctx: context.Background(),
h: h,
t: cilium.NewTemplater(h),
manifest: []byte("manifestContent"),
uri: "oci://public.ecr.aws/isovalent/cilium",
version: "1.9.11-eksa.1",
namespace: "kube-system",
currentSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Version = "v1.9.10-eksa.1"
s.VersionsBundle.Cilium.Cilium.URI = "public.ecr.aws/isovalent/cilium:v1.9.10-eksa.1"
s.VersionsBundle.Cilium.Operator.URI = "public.ecr.aws/isovalent/operator-generic:v1.9.10-eksa.1"
s.VersionsBundle.Cilium.HelmChart.URI = "public.ecr.aws/isovalent/cilium:1.9.10-eksa.1"
s.VersionsBundle.KubeDistro.Kubernetes.Tag = "v1.22.5-eks-1-22-9"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}}
}),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Version = "v1.9.11-eksa.1"
s.VersionsBundle.Cilium.Cilium.URI = "public.ecr.aws/isovalent/cilium:v1.9.11-eksa.1"
s.VersionsBundle.Cilium.Operator.URI = "public.ecr.aws/isovalent/operator-generic:v1.9.11-eksa.1"
s.VersionsBundle.Cilium.HelmChart.URI = "public.ecr.aws/isovalent/cilium:1.9.11-eksa.1"
s.VersionsBundle.KubeDistro.Kubernetes.Tag = "v1.22.5-eks-1-22-9"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}}
}),
}
}
func (t *templaterTest) expectHelmTemplateWith(wantValues gomock.Matcher, kubeVersion string) *gomock.Call {
return t.h.EXPECT().Template(t.ctx, t.uri, t.version, t.namespace, wantValues, kubeVersion)
}
func eqMap(m map[string]interface{}) gomock.Matcher {
return &mapMatcher{m: m}
}
// mapMacher implements a gomock matcher for maps
// transforms any map or struct into a map[string]interface{} and uses DeepEqual to compare.
type mapMatcher struct {
m map[string]interface{}
}
func (e *mapMatcher) Matches(x interface{}) bool {
xJson, err := json.Marshal(x)
if err != nil {
return false
}
xMap := &map[string]interface{}{}
err = json.Unmarshal(xJson, xMap)
if err != nil {
return false
}
return reflect.DeepEqual(e.m, *xMap)
}
func (e *mapMatcher) String() string {
return fmt.Sprintf("matches map %v", e.m)
}
func TestTemplaterGenerateUpgradePreflightManifestSuccess(t *testing.T) {
wantValues := map[string]interface{}{
"cni": map[string]interface{}{
"chainingMode": "portmap",
},
"ipam": map[string]interface{}{
"mode": "kubernetes",
},
"identityAllocationMode": "crd",
"prometheus": map[string]interface{}{
"enabled": true,
},
"rollOutCiliumPods": true,
"tunnel": "geneve",
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/cilium",
"tag": "v1.9.11-eksa.1",
},
"operator": map[string]interface{}{
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/operator",
"tag": "v1.9.11-eksa.1",
},
"prometheus": map[string]interface{}{
"enabled": true,
},
"enabled": false,
},
"preflight": map[string]interface{}{
"enabled": true,
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/cilium",
"tag": "v1.9.11-eksa.1",
},
},
"agent": false,
}
tt := newtemplaterTest(t)
tt.expectHelmTemplateWith(eqMap(wantValues), "1.22").Return(tt.manifest, nil)
tt.Expect(tt.t.GenerateUpgradePreflightManifest(tt.ctx, tt.spec)).To(Equal(tt.manifest), "templater.GenerateUpgradePreflightManifest() should return right manifest")
}
func TestTemplaterGenerateUpgradePreflightManifestError(t *testing.T) {
tt := newtemplaterTest(t)
tt.expectHelmTemplateWith(gomock.Any(), "1.22").Return(nil, errors.New("error from helm")) // Using any because we only want to test the returned error
_, err := tt.t.GenerateUpgradePreflightManifest(tt.ctx, tt.spec)
tt.Expect(err).To(HaveOccurred(), "templater.GenerateUpgradePreflightManifest() should fail")
tt.Expect(err).To(MatchError(ContainSubstring("error from helm")))
}
func TestTemplaterGenerateUpgradePreflightManifestInvalidKubeVersion(t *testing.T) {
tt := newtemplaterTest(t)
tt.spec.VersionsBundle.KubeDistro.Kubernetes.Tag = "v1-invalid"
_, err := tt.t.GenerateUpgradePreflightManifest(tt.ctx, tt.spec)
tt.Expect(err).To(HaveOccurred(), "templater.GenerateUpgradePreflightManifest() should fail")
tt.Expect(err).To(MatchError(ContainSubstring("invalid major version in semver")))
}
func TestTemplaterGenerateManifestSuccess(t *testing.T) {
wantValues := map[string]interface{}{
"cni": map[string]interface{}{
"chainingMode": "portmap",
},
"ipam": map[string]interface{}{
"mode": "kubernetes",
},
"identityAllocationMode": "crd",
"prometheus": map[string]interface{}{
"enabled": true,
},
"rollOutCiliumPods": true,
"tunnel": "geneve",
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/cilium",
"tag": "v1.9.11-eksa.1",
},
"operator": map[string]interface{}{
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/operator",
"tag": "v1.9.11-eksa.1",
},
"prometheus": map[string]interface{}{
"enabled": true,
},
},
}
tt := newtemplaterTest(t)
tt.expectHelmTemplateWith(eqMap(wantValues), "1.22").Return(tt.manifest, nil)
tt.Expect(tt.t.GenerateManifest(tt.ctx, tt.spec)).To(Equal(tt.manifest), "templater.GenerateManifest() should return right manifest")
}
func TestTemplaterGenerateManifestPolicyEnforcementModeSuccess(t *testing.T) {
wantValues := map[string]interface{}{
"cni": map[string]interface{}{
"chainingMode": "portmap",
},
"ipam": map[string]interface{}{
"mode": "kubernetes",
},
"identityAllocationMode": "crd",
"prometheus": map[string]interface{}{
"enabled": true,
},
"rollOutCiliumPods": true,
"tunnel": "geneve",
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/cilium",
"tag": "v1.9.11-eksa.1",
},
"operator": map[string]interface{}{
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/operator",
"tag": "v1.9.11-eksa.1",
},
"prometheus": map[string]interface{}{
"enabled": true,
},
},
"policyEnforcementMode": "always",
}
tt := newtemplaterTest(t)
tt.spec.Cluster.Spec.ManagementCluster.Name = "managed"
tt.spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode = v1alpha1.CiliumPolicyModeAlways
tt.expectHelmTemplateWith(eqMap(wantValues), "1.22").Return(tt.manifest, nil)
gotManifest, err := tt.t.GenerateManifest(tt.ctx, tt.spec)
tt.Expect(err).NotTo(HaveOccurred())
test.AssertContentToFile(t, string(gotManifest), "testdata/manifest_network_policy.yaml")
}
func TestTemplaterGenerateManifestEgressMasqueradeInterfacesSuccess(t *testing.T) {
wantValues := map[string]interface{}{
"cni": map[string]interface{}{
"chainingMode": "portmap",
},
"ipam": map[string]interface{}{
"mode": "kubernetes",
},
"identityAllocationMode": "crd",
"prometheus": map[string]interface{}{
"enabled": true,
},
"rollOutCiliumPods": true,
"tunnel": "geneve",
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/cilium",
"tag": "v1.9.11-eksa.1",
},
"operator": map[string]interface{}{
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/operator",
"tag": "v1.9.11-eksa.1",
},
"prometheus": map[string]interface{}{
"enabled": true,
},
},
"egressMasqueradeInterfaces": "eth0",
}
tt := newtemplaterTest(t)
tt.spec.Cluster.Spec.ManagementCluster.Name = "managed"
tt.spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.EgressMasqueradeInterfaces = "eth0"
tt.expectHelmTemplateWith(eqMap(wantValues), "1.22").Return(tt.manifest, nil)
tt.Expect(tt.t.GenerateManifest(tt.ctx, tt.spec)).To(Equal(tt.manifest), "templater.GenerateManifest() should return right manifest")
}
func TestTemplaterGenerateManifestError(t *testing.T) {
expectedAttempts := 2
tt := newtemplaterTest(t)
tt.expectHelmTemplateWith(gomock.Any(), "1.22").Return(nil, errors.New("error from helm")).Times(expectedAttempts) // Using any because we only want to test the returned error
_, err := tt.t.GenerateManifest(tt.ctx, tt.spec, cilium.WithRetrier(retrier.NewWithMaxRetries(expectedAttempts, 0)))
tt.Expect(err).To(HaveOccurred(), "templater.GenerateManifest() should fail")
tt.Expect(err).To(MatchError(ContainSubstring("error from helm")))
}
func TestTemplaterGenerateManifestInvalidKubeVersion(t *testing.T) {
tt := newtemplaterTest(t)
tt.spec.VersionsBundle.KubeDistro.Kubernetes.Tag = "v1-invalid"
_, err := tt.t.GenerateManifest(tt.ctx, tt.spec)
tt.Expect(err).To(HaveOccurred(), "templater.GenerateManifest() should fail")
tt.Expect(err).To(MatchError(ContainSubstring("invalid major version in semver")))
}
func TestTemplaterGenerateManifestUpgradeSameKubernetesVersionSuccess(t *testing.T) {
tt := newtemplaterTest(t)
tt.expectHelmTemplateWith(eqMap(wantUpgradeValues()), "1.22").Return(tt.manifest, nil)
oldCiliumVersion, err := semver.New(tt.currentSpec.VersionsBundle.Cilium.Version)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(
tt.t.GenerateManifest(tt.ctx, tt.spec,
cilium.WithUpgradeFromVersion(*oldCiliumVersion),
),
).To(Equal(tt.manifest), "templater.GenerateUpgradeManifest() should return right manifest")
}
func TestTemplaterGenerateManifestUpgradeNewKubernetesVersionSuccess(t *testing.T) {
tt := newtemplaterTest(t)
tt.expectHelmTemplateWith(eqMap(wantUpgradeValues()), "1.21").Return(tt.manifest, nil)
oldCiliumVersion, err := semver.New(tt.currentSpec.VersionsBundle.Cilium.Version)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(
tt.t.GenerateManifest(tt.ctx, tt.spec,
cilium.WithKubeVersion("1.21"),
cilium.WithUpgradeFromVersion(*oldCiliumVersion),
),
).To(Equal(tt.manifest), "templater.GenerateUpgradeManifest() should return right manifest")
}
func wantUpgradeValues() map[string]interface{} {
return map[string]interface{}{
"cni": map[string]interface{}{
"chainingMode": "portmap",
},
"ipam": map[string]interface{}{
"mode": "kubernetes",
},
"identityAllocationMode": "crd",
"prometheus": map[string]interface{}{
"enabled": true,
},
"rollOutCiliumPods": true,
"tunnel": "geneve",
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/cilium",
"tag": "v1.9.11-eksa.1",
},
"operator": map[string]interface{}{
"image": map[string]interface{}{
"repository": "public.ecr.aws/isovalent/operator",
"tag": "v1.9.11-eksa.1",
},
"prometheus": map[string]interface{}{
"enabled": true,
},
},
"upgradeCompatibility": "1.9",
}
}
func TestTemplaterGenerateNetworkPolicy(t *testing.T) {
tests := []struct {
name string
k8sVersion string
selfManaged bool
gitopsEnabled bool
infraProviderNamespaces []string
wantNetworkPolicyFile string
}{
{
name: "CAPV mgmt cluster",
k8sVersion: "v1.21.9-eks-1-21-10",
selfManaged: true,
gitopsEnabled: false,
infraProviderNamespaces: []string{"capv-system"},
wantNetworkPolicyFile: "testdata/network_policy_mgmt_capv.yaml",
},
{
name: "CAPT mgmt cluster with flux",
k8sVersion: "v1.21.9-eks-1-21-10",
selfManaged: true,
gitopsEnabled: true,
infraProviderNamespaces: []string{"capt-system"},
wantNetworkPolicyFile: "testdata/network_policy_mgmt_capt_flux.yaml",
},
{
name: "workload cluster 1.20",
k8sVersion: "v1.20.9-eks-1-20-10",
selfManaged: false,
gitopsEnabled: false,
infraProviderNamespaces: []string{},
wantNetworkPolicyFile: "testdata/network_policy_workload_120.yaml",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
temp := newtemplaterTest(t)
temp.spec.VersionsBundle.KubeDistro.Kubernetes.Tag = tt.k8sVersion
if !tt.selfManaged {
temp.spec.Cluster.Spec.ManagementCluster.Name = "managed"
}
if tt.gitopsEnabled {
temp.spec.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{
Kind: v1alpha1.FluxConfigKind,
Name: "eksa-unit-test",
}
temp.spec.Config.GitOpsConfig = &v1alpha1.GitOpsConfig{
Spec: v1alpha1.GitOpsConfigSpec{
Flux: v1alpha1.Flux{Github: v1alpha1.Github{FluxSystemNamespace: "flux-system"}},
},
}
}
networkPolicy, err := temp.t.GenerateNetworkPolicyManifest(temp.spec, tt.infraProviderNamespaces)
if err != nil {
t.Fatalf("failed to generate network policy template: %v", err)
}
test.AssertContentToFile(t, string(networkPolicy), tt.wantNetworkPolicyFile)
})
}
}
func TestTemplaterGenerateManifestForSingleNodeCluster(t *testing.T) {
tt := newtemplaterTest(t)
tt.spec.Cluster.Spec.WorkerNodeGroupConfigurations = nil
tt.spec.Cluster.Spec.ControlPlaneConfiguration.Count = 1
tt.h.EXPECT().
Template(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_ interface{}, _ interface{}, _ interface{}, _ interface{}, values map[string]interface{}, _ interface{}) ([]byte, error) {
tt.Expect(reflect.ValueOf(values["operator"]).MapIndex(reflect.ValueOf("replicas")).Interface().(int)).To(Equal(1))
return tt.manifest, nil
})
tt.Expect(tt.t.GenerateManifest(tt.ctx, tt.spec)).To(Equal(tt.manifest), "templater.GenerateManifest() should return right manifest")
}
func TestTemplaterGenerateManifestForRegistryAuth(t *testing.T) {
tt := newtemplaterTest(t)
tt.spec.Cluster.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
Authenticate: true,
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
{
Registry: "783794618700.dkr.ecr.us-west-2.amazonaws.com",
Namespace: "curated-packages",
},
},
}
os.Unsetenv("REGISTRY_USERNAME")
os.Unsetenv("REGISTRY_PASSWORD")
_, err := tt.t.GenerateManifest(tt.ctx, tt.spec)
tt.Expect(err).To(HaveOccurred(), "templater.GenerateManifest() should fail")
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
tt.h.EXPECT().
RegistryLogin(gomock.Any(), "1.2.3.4:443", "username", "password").
Return(nil)
tt.h.EXPECT().
Template(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Return(tt.manifest, nil)
tt.Expect(tt.t.GenerateManifest(tt.ctx, tt.spec)).To(Equal(tt.manifest), "templater.GenerateManifest() should return right manifest")
}
| 462 |
eks-anywhere | aws | Go | package cilium
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/types"
)
// KubernetesClient is a client to interact with the Kubernetes API.
type KubernetesClient interface {
Apply(ctx context.Context, cluster *types.Cluster, data []byte) error
Delete(ctx context.Context, cluster *types.Cluster, data []byte) error
WaitForPreflightDaemonSet(ctx context.Context, cluster *types.Cluster) error
WaitForPreflightDeployment(ctx context.Context, cluster *types.Cluster) error
WaitForCiliumDaemonSet(ctx context.Context, cluster *types.Cluster) error
WaitForCiliumDeployment(ctx context.Context, cluster *types.Cluster) error
RolloutRestartCiliumDaemonSet(ctx context.Context, cluster *types.Cluster) error
}
// UpgradeTemplater generates a Cilium manifests for upgrade.
type UpgradeTemplater interface {
GenerateUpgradePreflightManifest(ctx context.Context, spec *cluster.Spec) ([]byte, error)
GenerateManifest(ctx context.Context, spec *cluster.Spec, opts ...ManifestOpt) ([]byte, error)
}
// Upgrader allows to upgrade a Cilium installation in a EKS-A cluster.
type Upgrader struct {
templater UpgradeTemplater
client KubernetesClient
// skipUpgrade indicates Cilium upgrades should be skipped.
skipUpgrade bool
}
// NewUpgrader constructs a new Upgrader.
func NewUpgrader(client KubernetesClient, templater UpgradeTemplater) *Upgrader {
return &Upgrader{
templater: templater,
client: client,
}
}
// Upgrade configures a Cilium installation to match the desired state in the cluster Spec.
func (u *Upgrader) Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec, namespaces []string) (*types.ChangeDiff, error) {
if u.skipUpgrade {
logger.V(1).Info("Cilium upgrade skipped")
return nil, nil
}
diff := ciliumChangeDiff(currentSpec, newSpec)
chartValuesChanged := ciliumHelmChartValuesChanged(currentSpec, newSpec)
if diff == nil && !chartValuesChanged {
logger.V(1).Info("Nothing to upgrade for Cilium, skipping")
return nil, nil
}
if diff != nil {
logger.V(1).Info("Upgrading Cilium", "oldVersion", diff.ComponentReports[0].OldVersion, "newVersion", diff.ComponentReports[0].NewVersion)
}
logger.V(4).Info("Generating Cilium upgrade preflight manifest")
preflight, err := u.templater.GenerateUpgradePreflightManifest(ctx, newSpec)
if err != nil {
return nil, err
}
logger.V(2).Info("Installing Cilium upgrade preflight manifest")
if err := u.client.Apply(ctx, cluster, preflight); err != nil {
return nil, fmt.Errorf("failed applying cilium preflight check: %v", err)
}
logger.V(3).Info("Waiting for Cilium upgrade preflight checks to be up")
if err := u.waitForPreflight(ctx, cluster); err != nil {
return nil, err
}
logger.V(3).Info("Deleting Cilium upgrade preflight")
if err := u.client.Delete(ctx, cluster, preflight); err != nil {
return nil, fmt.Errorf("failed deleting cilium preflight check: %v", err)
}
logger.V(3).Info("Generating Cilium upgrade manifest")
currentKubeVersion, err := getKubeVersionString(currentSpec)
if err != nil {
return nil, err
}
previousCiliumVersion, err := semver.New(currentSpec.VersionsBundle.Cilium.Version)
if err != nil {
return nil, err
}
upgradeManifest, err := u.templater.GenerateManifest(ctx, newSpec,
WithKubeVersion(currentKubeVersion),
WithUpgradeFromVersion(*previousCiliumVersion),
WithPolicyAllowedNamespaces(namespaces),
)
if err != nil {
return nil, err
}
logger.V(2).Info("Installing new Cilium version")
if err := u.client.Apply(ctx, cluster, upgradeManifest); err != nil {
return nil, fmt.Errorf("failed applying cilium upgrade: %v", err)
}
logger.V(3).Info("Waiting for upgraded Cilium to be ready")
if err := u.waitForCilium(ctx, cluster); err != nil {
return nil, err
}
return diff, nil
}
func (u *Upgrader) waitForPreflight(ctx context.Context, cluster *types.Cluster) error {
if err := u.client.WaitForPreflightDaemonSet(ctx, cluster); err != nil {
return err
}
if err := u.client.WaitForPreflightDeployment(ctx, cluster); err != nil {
return err
}
return nil
}
func (u *Upgrader) waitForCilium(ctx context.Context, cluster *types.Cluster) error {
if err := u.client.WaitForCiliumDaemonSet(ctx, cluster); err != nil {
return err
}
if err := u.client.WaitForCiliumDeployment(ctx, cluster); err != nil {
return err
}
return nil
}
func ciliumChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ChangeDiff {
if currentSpec.VersionsBundle.Cilium.Version == newSpec.VersionsBundle.Cilium.Version {
return nil
}
return &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
{
ComponentName: "cilium",
OldVersion: currentSpec.VersionsBundle.Cilium.Version,
NewVersion: newSpec.VersionsBundle.Cilium.Version,
},
},
}
}
func ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ChangeDiff {
return ciliumChangeDiff(currentSpec, newSpec)
}
func ciliumHelmChartValuesChanged(currentSpec, newSpec *cluster.Spec) bool {
if currentSpec.Cluster.Spec.ClusterNetwork.CNIConfig == nil || currentSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium == nil {
// this is for clusters created using 0.7 and lower versions, they won't have these fields initialized
// in these cases, a non-default PolicyEnforcementMode in the newSpec will be considered a change
if newSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode != v1alpha1.CiliumPolicyModeDefault {
return true
}
} else {
if newSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode != currentSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode {
return true
}
if newSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.EgressMasqueradeInterfaces != currentSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.EgressMasqueradeInterfaces {
return true
}
}
// we can add comparisons for more values here as we start accepting them from cluster spec
return false
}
func (u *Upgrader) RunPostControlPlaneUpgradeSetup(ctx context.Context, cluster *types.Cluster) error {
// we need to restart cilium pods after control plane vms get upgraded to prevent issue seen in https://github.com/aws/eks-anywhere/issues/1888
if err := u.client.RolloutRestartCiliumDaemonSet(ctx, cluster); err != nil {
return fmt.Errorf("restarting cilium daemonset: %v", err)
}
return nil
}
// SetSkipUpgrade configures u to skip the upgrade process.
func (u *Upgrader) SetSkipUpgrade(v bool) {
u.skipUpgrade = v
}
| 195 |
eks-anywhere | aws | Go | package cilium_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/networking/cilium/mocks"
"github.com/aws/eks-anywhere/pkg/types"
)
type upgraderTest struct {
*WithT
ctx context.Context
u *cilium.Upgrader
h *mocks.MockHelm
client *mocks.MockKubernetesClient
manifestPre, manifest []byte
currentSpec, newSpec *cluster.Spec
cluster *types.Cluster
wantChangeDiff *types.ChangeDiff
}
func newUpgraderTest(t *testing.T) *upgraderTest {
ctrl := gomock.NewController(t)
h := mocks.NewMockHelm(ctrl)
client := mocks.NewMockKubernetesClient(ctrl)
u := cilium.NewUpgrader(client, cilium.NewTemplater(h))
return &upgraderTest{
WithT: NewWithT(t),
ctx: context.Background(),
h: h,
client: client,
u: u,
manifest: []byte("manifestContent"),
currentSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Version = "v1.9.10-eksa.1"
s.VersionsBundle.KubeDistro.Kubernetes.Tag = "v1.22.5-eks-1-22-9"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}}
}),
newSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Version = "v1.9.11-eksa.1"
s.VersionsBundle.KubeDistro.Kubernetes.Tag = "v1.22.5-eks-1-22-9"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}}
}),
cluster: &types.Cluster{
KubeconfigFile: "kubeconfig",
},
wantChangeDiff: types.NewChangeDiff(&types.ComponentChangeDiff{
ComponentName: "cilium",
OldVersion: "v1.9.10-eksa.1",
NewVersion: "v1.9.11-eksa.1",
}),
}
}
func (tt *upgraderTest) expectTemplatePreFlight() *gomock.Call {
return tt.expectTemplate(tt.manifestPre)
}
func (tt *upgraderTest) expectTemplateManifest() *gomock.Call {
return tt.expectTemplate(tt.manifest)
}
func (tt *upgraderTest) expectTemplate(manifest []byte) *gomock.Call {
// Using Any because this already tested in the templater tests
return tt.h.EXPECT().Template(
tt.ctx, gomock.AssignableToTypeOf(""), gomock.AssignableToTypeOf(""), gomock.AssignableToTypeOf(""), gomock.AssignableToTypeOf(map[string]interface{}{}), gomock.AssignableToTypeOf(""),
).Return(manifest, nil)
}
func TestUpgraderUpgradeSuccess(t *testing.T) {
tt := newUpgraderTest(t)
// Templater and client and already tested individually so we only want to test the flow (order of calls)
gomock.InOrder(
tt.expectTemplatePreFlight(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifestPre),
tt.client.EXPECT().WaitForPreflightDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForPreflightDeployment(tt.ctx, tt.cluster),
tt.client.EXPECT().Delete(tt.ctx, tt.cluster, tt.manifestPre),
tt.expectTemplateManifest(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifest),
tt.client.EXPECT().WaitForCiliumDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForCiliumDeployment(tt.ctx, tt.cluster),
)
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(Equal(tt.wantChangeDiff), "upgrader.Upgrade() should succeed and return correct ChangeDiff")
}
func TestUpgraderUpgradeNotNeeded(t *testing.T) {
tt := newUpgraderTest(t)
tt.currentSpec.VersionsBundle.Cilium.Version = "v1.0.0"
tt.newSpec.VersionsBundle.Cilium.Version = "v1.0.0"
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(BeNil(), "upgrader.Upgrade() should succeed and return nil ChangeDiff")
}
func TestUpgraderUpgradeSuccessValuesChanged(t *testing.T) {
tt := newUpgraderTest(t)
tt.currentSpec.VersionsBundle.Cilium.Version = "v1.0.0"
tt.newSpec.VersionsBundle.Cilium.Version = "v1.0.0"
// setting policy enforcement mode to something other than the "default" mode
tt.newSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode = v1alpha1.CiliumPolicyModeNever
// Templater and client and already tested individually so we only want to test the flow (order of calls)
gomock.InOrder(
tt.expectTemplatePreFlight(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifestPre),
tt.client.EXPECT().WaitForPreflightDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForPreflightDeployment(tt.ctx, tt.cluster),
tt.client.EXPECT().Delete(tt.ctx, tt.cluster, tt.manifestPre),
tt.expectTemplateManifest(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifest),
tt.client.EXPECT().WaitForCiliumDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForCiliumDeployment(tt.ctx, tt.cluster),
)
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(BeNil(), "upgrader.Upgrade() should succeed and return nil ChangeDiff")
}
func TestUpgraderUpgradeSuccessValuesChangedUpgradeFromNilCNIConfigSpec(t *testing.T) {
tt := newUpgraderTest(t)
tt.currentSpec.VersionsBundle.Cilium.Version = "v1.0.0"
tt.newSpec.VersionsBundle.Cilium.Version = "v1.0.0"
// simulate the case where existing cluster's CNIConfig is nil
tt.currentSpec.Cluster.Spec.ClusterNetwork.CNIConfig = nil
// setting policy enforcement mode to something other than the "default" mode
tt.newSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode = v1alpha1.CiliumPolicyModeNever
// Templater and client and already tested individually so we only want to test the flow (order of calls)
gomock.InOrder(
tt.expectTemplatePreFlight(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifestPre),
tt.client.EXPECT().WaitForPreflightDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForPreflightDeployment(tt.ctx, tt.cluster),
tt.client.EXPECT().Delete(tt.ctx, tt.cluster, tt.manifestPre),
tt.expectTemplateManifest(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifest),
tt.client.EXPECT().WaitForCiliumDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForCiliumDeployment(tt.ctx, tt.cluster),
)
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(BeNil(), "upgrader.Upgrade() should succeed and return nil ChangeDiff")
}
func TestUpgraderUpgradeSuccessValuesChangedUpgradeFromNilCiliumConfigSpec(t *testing.T) {
tt := newUpgraderTest(t)
tt.currentSpec.VersionsBundle.Cilium.Version = "v1.0.0"
tt.newSpec.VersionsBundle.Cilium.Version = "v1.0.0"
// simulate the case where existing cluster's CNIConfig is nil
tt.currentSpec.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{Cilium: nil}
// setting policy enforcement mode to something other than the "default" mode
tt.newSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode = v1alpha1.CiliumPolicyModeNever
// Templater and client and already tested individually so we only want to test the flow (order of calls)
gomock.InOrder(
tt.expectTemplatePreFlight(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifestPre),
tt.client.EXPECT().WaitForPreflightDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForPreflightDeployment(tt.ctx, tt.cluster),
tt.client.EXPECT().Delete(tt.ctx, tt.cluster, tt.manifestPre),
tt.expectTemplateManifest(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifest),
tt.client.EXPECT().WaitForCiliumDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForCiliumDeployment(tt.ctx, tt.cluster),
)
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(BeNil(), "upgrader.Upgrade() should succeed and return nil ChangeDiff")
}
func TestUpgraderUpgradeSuccessEgressMasqueradeInterfacesValueChanged(t *testing.T) {
tt := newUpgraderTest(t)
tt.currentSpec.VersionsBundle.Cilium.Version = "v1.0.0"
tt.newSpec.VersionsBundle.Cilium.Version = "v1.0.0"
// setting egress masquerade interfaces to something other than ""
tt.newSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.EgressMasqueradeInterfaces = "test"
// Templater and client and already tested individually so we only want to test the flow (order of calls)
gomock.InOrder(
tt.expectTemplatePreFlight(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifestPre),
tt.client.EXPECT().WaitForPreflightDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForPreflightDeployment(tt.ctx, tt.cluster),
tt.client.EXPECT().Delete(tt.ctx, tt.cluster, tt.manifestPre),
tt.expectTemplateManifest(),
tt.client.EXPECT().Apply(tt.ctx, tt.cluster, tt.manifest),
tt.client.EXPECT().WaitForCiliumDaemonSet(tt.ctx, tt.cluster),
tt.client.EXPECT().WaitForCiliumDeployment(tt.ctx, tt.cluster),
)
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(BeNil(), "upgrader.Upgrade() should succeed and return nil ChangeDiff")
}
func TestUpgraderRunPostControlPlaneUpgradeSetup(t *testing.T) {
tt := newUpgraderTest(t)
tt.client.EXPECT().RolloutRestartCiliumDaemonSet(tt.ctx, tt.cluster)
tt.Expect(tt.u.RunPostControlPlaneUpgradeSetup(tt.ctx, tt.cluster)).To(Succeed())
}
| 209 |
eks-anywhere | aws | Go | package cilium
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
const (
// PolicyEnforcementConfigMapKey is the key used in the "cilium-config" ConfigMap to
// store the value for the PolicyEnforcementMode.
PolicyEnforcementConfigMapKey = "enable-policy"
// PolicyEnforcementComponentName is the ConfigComponentUpdatePlan name for the
// PolicyEnforcement configuration component.
PolicyEnforcementComponentName = "PolicyEnforcementMode"
// EgressMasqueradeInterfacesMapKey is the key used in the "cilium-config" ConfigMap to
// store the value for the EgressMasqueradeInterfaces.
EgressMasqueradeInterfacesMapKey = "egress-masquerade-interfaces"
// EgressMasqueradeInterfacesComponentName is the ConfigComponentUpdatePlan name for the
// egressMasqueradeInterfaces configuration component.
EgressMasqueradeInterfacesComponentName = "EgressMasqueradeInterfaces"
)
// UpgradePlan contains information about a Cilium installation upgrade.
type UpgradePlan struct {
DaemonSet VersionedComponentUpgradePlan
Operator VersionedComponentUpgradePlan
ConfigMap ConfigUpdatePlan
}
// Needed determines if an upgrade is needed or not
// Returns true if any of the installation components needs an upgrade.
func (c UpgradePlan) Needed() bool {
return c.VersionUpgradeNeeded() || c.ConfigUpdateNeeded()
}
// VersionUpgradeNeeded determines if a version upgrade is needed or not
// Returns true if any of the installation components needs an upgrade.
func (c UpgradePlan) VersionUpgradeNeeded() bool {
return c.DaemonSet.Needed() || c.Operator.Needed()
}
// ConfigUpdateNeeded determines if an upgrade is needed on the cilium config or not.
func (c UpgradePlan) ConfigUpdateNeeded() bool {
return c.ConfigMap.Needed()
}
// Reason returns the reason why an upgrade might be needed
// If no upgrade needed, returns empty string
// For multiple components with needed upgrades, it composes their reasons into one.
func (c UpgradePlan) Reason() string {
components := []interface {
reason() string
}{
c.DaemonSet,
c.Operator,
c.ConfigMap,
}
s := make([]string, 0, 3)
for _, component := range components {
if reason := component.reason(); reason != "" {
s = append(s, reason)
}
}
return strings.Join(s, " - ")
}
// VersionedComponentUpgradePlan contains upgrade information for a Cilium versioned component.
type VersionedComponentUpgradePlan struct {
UpgradeReason string
OldImage string
NewImage string
}
// Needed determines if an upgrade is needed or not.
func (c VersionedComponentUpgradePlan) Needed() bool {
return c.UpgradeReason != ""
}
// reason returns the reason for the upgrade if needed.
// If upgrade is not needed, it returns an empty string.
func (c VersionedComponentUpgradePlan) reason() string {
return c.UpgradeReason
}
// ConfigUpdatePlan contains update information for the Cilium config.
type ConfigUpdatePlan struct {
UpdateReason string
Components []ConfigComponentUpdatePlan
}
// Needed determines if an upgrade is needed or not.
func (c ConfigUpdatePlan) Needed() bool {
return c.UpdateReason != ""
}
// reason returns the reason for the upgrade if needed.
// If upgrade is not needed, it returns an empty string.
func (c ConfigUpdatePlan) reason() string {
return c.UpdateReason
}
// generateUpdateReasonFromComponents reads the update reasons for the components
// and generates a compounded update reason. This is not thread safe.
func (c *ConfigUpdatePlan) generateUpdateReasonFromComponents() {
r := make([]string, 0, len(c.Components))
for _, component := range c.Components {
if reason := component.UpdateReason; reason != "" {
r = append(r, reason)
}
}
if newReason := strings.Join(r, " - "); newReason != "" {
c.UpdateReason = newReason
}
}
// ConfigComponentUpdatePlan contains update information for a Cilium config component.
type ConfigComponentUpdatePlan struct {
Name string
UpdateReason string
OldValue, NewValue string
}
// BuildUpgradePlan generates the upgrade plan information for a cilium installation by comparing it
// with a desired cluster Spec.
func BuildUpgradePlan(installation *Installation, clusterSpec *cluster.Spec) UpgradePlan {
return UpgradePlan{
DaemonSet: daemonSetUpgradePlan(installation.DaemonSet, clusterSpec),
Operator: operatorUpgradePlan(installation.Operator, clusterSpec),
ConfigMap: configMapUpgradePlan(installation.ConfigMap, clusterSpec),
}
}
func daemonSetUpgradePlan(ds *appsv1.DaemonSet, clusterSpec *cluster.Spec) VersionedComponentUpgradePlan {
dsImage := clusterSpec.VersionsBundle.Cilium.Cilium.VersionedImage()
info := VersionedComponentUpgradePlan{
NewImage: dsImage,
}
if ds == nil {
info.UpgradeReason = "DaemonSet doesn't exist"
return info
}
oldDSImage := ds.Spec.Template.Spec.Containers[0].Image
info.OldImage = oldDSImage
containers := make([]corev1.Container, 0, len(ds.Spec.Template.Spec.Containers)+len(ds.Spec.Template.Spec.InitContainers))
containers = append(containers, ds.Spec.Template.Spec.Containers...)
containers = append(containers, ds.Spec.Template.Spec.InitContainers...)
for _, c := range containers {
if c.Image != dsImage {
info.OldImage = c.Image
info.UpgradeReason = fmt.Sprintf("DaemonSet container %s doesn't match image [%s] -> [%s]", c.Name, c.Image, dsImage)
return info
}
}
return info
}
func operatorUpgradePlan(operator *appsv1.Deployment, clusterSpec *cluster.Spec) VersionedComponentUpgradePlan {
newImage := clusterSpec.VersionsBundle.Cilium.Operator.VersionedImage()
info := VersionedComponentUpgradePlan{
NewImage: newImage,
}
if operator == nil {
info.UpgradeReason = "Operator deployment doesn't exist"
return info
}
if len(operator.Spec.Template.Spec.Containers) == 0 {
info.UpgradeReason = "Operator deployment doesn't have any containers"
return info
}
oldImage := operator.Spec.Template.Spec.Containers[0].Image
info.OldImage = oldImage
if oldImage != newImage {
info.UpgradeReason = fmt.Sprintf("Operator container doesn't match the provided image [%s] -> [%s]", oldImage, newImage)
return info
}
return info
}
func configMapUpgradePlan(configMap *corev1.ConfigMap, clusterSpec *cluster.Spec) ConfigUpdatePlan {
updatePlan := &ConfigUpdatePlan{}
var newEnforcementPolicy string
if clusterSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode == "" {
newEnforcementPolicy = "default"
} else {
newEnforcementPolicy = string(clusterSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.PolicyEnforcementMode)
}
policyEnforcementUpdate := ConfigComponentUpdatePlan{
Name: PolicyEnforcementComponentName,
NewValue: newEnforcementPolicy,
}
if configMap == nil {
updatePlan.UpdateReason = "Cilium config doesn't exist"
} else if val, ok := configMap.Data[PolicyEnforcementConfigMapKey]; ok && val != "" {
policyEnforcementUpdate.OldValue = val
if policyEnforcementUpdate.OldValue != policyEnforcementUpdate.NewValue {
policyEnforcementUpdate.UpdateReason = fmt.Sprintf("Cilium enable-policy changed: [%s] -> [%s]", policyEnforcementUpdate.OldValue, policyEnforcementUpdate.NewValue)
}
} else {
policyEnforcementUpdate.UpdateReason = "Cilium enable-policy field is not present in config"
}
updatePlan.Components = append(updatePlan.Components, policyEnforcementUpdate)
newEgressMasqueradeInterfaces := clusterSpec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.EgressMasqueradeInterfaces
egressMasqueradeUpdate := ConfigComponentUpdatePlan{
Name: EgressMasqueradeInterfacesComponentName,
NewValue: newEgressMasqueradeInterfaces,
}
if configMap == nil {
updatePlan.UpdateReason = "Cilium config doesn't exist"
} else if val, ok := configMap.Data[EgressMasqueradeInterfacesMapKey]; ok && val != "" {
egressMasqueradeUpdate.OldValue = val
if egressMasqueradeUpdate.OldValue != egressMasqueradeUpdate.NewValue {
egressMasqueradeUpdate.UpdateReason = fmt.Sprintf("Egress masquerade interfaces changed: [%s] -> [%s]", egressMasqueradeUpdate.OldValue, egressMasqueradeUpdate.NewValue)
}
} else if egressMasqueradeUpdate.NewValue != "" {
egressMasqueradeUpdate.UpdateReason = "Egress masquerade interfaces field is not present in config but is configured in cluster spec"
}
updatePlan.Components = append(updatePlan.Components, egressMasqueradeUpdate)
updatePlan.generateUpdateReasonFromComponents()
return *updatePlan
}
| 251 |
eks-anywhere | aws | Go | package cilium_test
import (
"testing"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
)
func TestBuildUpgradePlan(t *testing.T) {
tests := []struct {
name string
installation *cilium.Installation
clusterSpec *cluster.Spec
want cilium.UpgradePlan
}{
{
name: "no upgrade needed",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "daemon set not installed",
installation: &cilium.Installation{
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "DaemonSet doesn't exist",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "daemon container old version",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "DaemonSet container agent doesn't match image [cilium:v1.0.0] -> [cilium:v1.0.1]",
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.1",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "daemon init container old version",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.1", func(ds *appsv1.DaemonSet) {
ds.Spec.Template.Spec.InitContainers = []corev1.Container{
{
Name: "init",
Image: "cilium:v1.0.0",
},
}
}),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "DaemonSet container init doesn't match image [cilium:v1.0.0] -> [cilium:v1.0.1]",
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.1",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "operator is not present",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "Operator deployment doesn't exist",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "operator 0 containers",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0", func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers = nil
}),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "Operator deployment doesn't have any containers",
NewImage: "cilium-operator:v1.0.1",
},
ConfigMap: cilium.ConfigUpdatePlan{
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "operator container old version",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "Operator container doesn't match the provided image [cilium-operator:v1.0.0] -> [cilium-operator:v1.0.1]",
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.1",
},
ConfigMap: cilium.ConfigUpdatePlan{
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "config map doesn't exist",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "Cilium config doesn't exist",
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
NewValue: "default",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "PolicyEnforcementMode has changed",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", ""),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
PolicyEnforcementMode: anywherev1.CiliumPolicyModeAlways,
},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "Cilium enable-policy changed: [default] -> [always]",
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "always",
UpdateReason: "Cilium enable-policy changed: [default] -> [always]",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "PolicyEnforcementMode not present in config",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", "", func(cm *corev1.ConfigMap) {
cm.Data = nil
}),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
PolicyEnforcementMode: anywherev1.CiliumPolicyModeAlways,
},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "Cilium enable-policy field is not present in config",
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "",
NewValue: "always",
UpdateReason: "Cilium enable-policy field is not present in config",
},
{
Name: "EgressMasqueradeInterfaces",
},
},
},
},
},
{
name: "EgressMasqueradeInterfaces has changed",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", "old"),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
EgressMasqueradeInterfaces: "new",
},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "Egress masquerade interfaces changed: [old] -> [new]",
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "default",
NewValue: "default",
},
{
Name: cilium.EgressMasqueradeInterfacesComponentName,
OldValue: "old",
NewValue: "new",
UpdateReason: "Egress masquerade interfaces changed: [old] -> [new]",
},
},
},
},
},
{
name: "EgressMasqueradeInterfaces not present in config",
installation: &cilium.Installation{
DaemonSet: daemonSet("cilium:v1.0.0"),
Operator: deployment("cilium-operator:v1.0.0"),
ConfigMap: ciliumConfigMap("default", "", func(cm *corev1.ConfigMap) {
cm.Data = nil
}),
},
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:v1.0.0"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:v1.0.0"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
EgressMasqueradeInterfaces: "new",
},
}
}),
want: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium:v1.0.0",
NewImage: "cilium:v1.0.0",
},
Operator: cilium.VersionedComponentUpgradePlan{
OldImage: "cilium-operator:v1.0.0",
NewImage: "cilium-operator:v1.0.0",
},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "Cilium enable-policy field is not present in config - Egress masquerade interfaces field is not present in config but is configured in cluster spec",
Components: []cilium.ConfigComponentUpdatePlan{
{
Name: cilium.PolicyEnforcementComponentName,
OldValue: "",
NewValue: "default",
UpdateReason: "Cilium enable-policy field is not present in config",
},
{
Name: cilium.EgressMasqueradeInterfacesComponentName,
OldValue: "",
NewValue: "new",
UpdateReason: "Egress masquerade interfaces field is not present in config but is configured in cluster spec",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(
cilium.BuildUpgradePlan(tt.installation, tt.clusterSpec),
).To(Equal(tt.want))
})
}
}
type deploymentOpt func(*appsv1.Deployment)
func deployment(image string, opts ...deploymentOpt) *appsv1.Deployment {
d := &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: image,
},
},
},
},
},
}
for _, opt := range opts {
opt(d)
}
return d
}
type dsOpt func(*appsv1.DaemonSet)
func daemonSet(image string, opts ...dsOpt) *appsv1.DaemonSet {
d := &appsv1.DaemonSet{
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "agent",
Image: image,
},
},
},
},
},
}
for _, opt := range opts {
opt(d)
}
return d
}
type cmOpt func(*corev1.ConfigMap)
func ciliumConfigMap(enforcementMode string, egressMasqueradeInterface string, opts ...cmOpt) *corev1.ConfigMap {
cm := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: cilium.ConfigMapName,
Namespace: "kube-system",
},
Data: map[string]string{
cilium.PolicyEnforcementConfigMapKey: enforcementMode,
cilium.EgressMasqueradeInterfacesMapKey: egressMasqueradeInterface,
},
}
for _, o := range opts {
o(cm)
}
return cm
}
func TestConfigUpdatePlanNeeded(t *testing.T) {
tests := []struct {
name string
info cilium.ConfigUpdatePlan
want bool
}{
{
name: "not needed",
info: cilium.ConfigUpdatePlan{
UpdateReason: "",
},
want: false,
},
{
name: "needed",
info: cilium.ConfigUpdatePlan{
UpdateReason: "missing ds",
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.info.Needed()).To(Equal(tt.want))
})
}
}
func TestVersionedComponentUpgradePlanNeeded(t *testing.T) {
tests := []struct {
name string
info cilium.VersionedComponentUpgradePlan
want bool
}{
{
name: "not needed",
info: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "",
},
want: false,
},
{
name: "needed",
info: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "missing ds",
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.info.Needed()).To(Equal(tt.want))
})
}
}
func TestUpgradePlanNeeded(t *testing.T) {
tests := []struct {
name string
info cilium.UpgradePlan
want bool
}{
{
name: "not needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{},
ConfigMap: cilium.ConfigUpdatePlan{},
},
want: false,
},
{
name: "ds needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "ds old version",
},
Operator: cilium.VersionedComponentUpgradePlan{},
ConfigMap: cilium.ConfigUpdatePlan{},
},
want: true,
},
{
name: "operator needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "operator old version",
},
ConfigMap: cilium.ConfigUpdatePlan{},
},
want: true,
},
{
name: "config needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "config has changed",
},
},
want: true,
},
{
name: "all needed needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "ds old version",
},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "operator old version",
},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "config has changed",
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.info.Needed()).To(Equal(tt.want))
})
}
}
func TestUpgradePlanVersionUpgradeNeeded(t *testing.T) {
tests := []struct {
name string
info cilium.UpgradePlan
want bool
}{
{
name: "not needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "config has changed",
},
},
want: false,
},
{
name: "ds needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "ds old version",
},
Operator: cilium.VersionedComponentUpgradePlan{},
ConfigMap: cilium.ConfigUpdatePlan{},
},
want: true,
},
{
name: "operator needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "operator old version",
},
ConfigMap: cilium.ConfigUpdatePlan{},
},
want: true,
},
{
name: "both needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "ds old version",
},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "operator old version",
},
ConfigMap: cilium.ConfigUpdatePlan{},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.info.VersionUpgradeNeeded()).To(Equal(tt.want))
})
}
}
func TestUpgradePlanConfigUpdateNeeded(t *testing.T) {
tests := []struct {
name string
info cilium.UpgradePlan
want bool
}{
{
name: "not needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "ds old version",
},
Operator: cilium.VersionedComponentUpgradePlan{},
ConfigMap: cilium.ConfigUpdatePlan{},
},
want: false,
},
{
name: "config needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "config has changed",
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.info.ConfigUpdateNeeded()).To(Equal(tt.want))
})
}
}
func TestUpgradePlanReason(t *testing.T) {
tests := []struct {
name string
info cilium.UpgradePlan
want string
}{
{
name: "not needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{},
},
want: "",
},
{
name: "ds needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "ds old version",
},
Operator: cilium.VersionedComponentUpgradePlan{},
},
want: "ds old version",
},
{
name: "operator needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "operator old version",
},
},
want: "operator old version",
},
{
name: "all needed",
info: cilium.UpgradePlan{
DaemonSet: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "ds old version",
},
Operator: cilium.VersionedComponentUpgradePlan{
UpgradeReason: "operator old version",
},
ConfigMap: cilium.ConfigUpdatePlan{
UpdateReason: "config has changed",
},
},
want: "ds old version - operator old version - config has changed",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.info.Reason()).To(Equal(tt.want))
})
}
}
| 872 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networking/cilium/client.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
types "github.com/aws/eks-anywhere/pkg/types"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/api/apps/v1"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// ApplyKubeSpecFromBytes mocks base method.
func (m *MockClient) ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytes", ctx, cluster, data)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytes indicates an expected call of ApplyKubeSpecFromBytes.
func (mr *MockClientMockRecorder) ApplyKubeSpecFromBytes(ctx, cluster, data interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytes", reflect.TypeOf((*MockClient)(nil).ApplyKubeSpecFromBytes), ctx, cluster, data)
}
// DeleteKubeSpecFromBytes mocks base method.
func (m *MockClient) DeleteKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteKubeSpecFromBytes", ctx, cluster, data)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteKubeSpecFromBytes indicates an expected call of DeleteKubeSpecFromBytes.
func (mr *MockClientMockRecorder) DeleteKubeSpecFromBytes(ctx, cluster, data interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteKubeSpecFromBytes", reflect.TypeOf((*MockClient)(nil).DeleteKubeSpecFromBytes), ctx, cluster, data)
}
// GetDaemonSet mocks base method.
func (m *MockClient) GetDaemonSet(ctx context.Context, name, namespace, kubeconfig string) (*v1.DaemonSet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDaemonSet", ctx, name, namespace, kubeconfig)
ret0, _ := ret[0].(*v1.DaemonSet)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDaemonSet indicates an expected call of GetDaemonSet.
func (mr *MockClientMockRecorder) GetDaemonSet(ctx, name, namespace, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDaemonSet", reflect.TypeOf((*MockClient)(nil).GetDaemonSet), ctx, name, namespace, kubeconfig)
}
// GetDeployment mocks base method.
func (m *MockClient) GetDeployment(ctx context.Context, name, namespace, kubeconfig string) (*v1.Deployment, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDeployment", ctx, name, namespace, kubeconfig)
ret0, _ := ret[0].(*v1.Deployment)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDeployment indicates an expected call of GetDeployment.
func (mr *MockClientMockRecorder) GetDeployment(ctx, name, namespace, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeployment", reflect.TypeOf((*MockClient)(nil).GetDeployment), ctx, name, namespace, kubeconfig)
}
// RolloutRestartDaemonSet mocks base method.
func (m *MockClient) RolloutRestartDaemonSet(ctx context.Context, name, namespace, kubeconfig string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RolloutRestartDaemonSet", ctx, name, namespace, kubeconfig)
ret0, _ := ret[0].(error)
return ret0
}
// RolloutRestartDaemonSet indicates an expected call of RolloutRestartDaemonSet.
func (mr *MockClientMockRecorder) RolloutRestartDaemonSet(ctx, name, namespace, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RolloutRestartDaemonSet", reflect.TypeOf((*MockClient)(nil).RolloutRestartDaemonSet), ctx, name, namespace, kubeconfig)
}
| 110 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networking/cilium/templater.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockHelm is a mock of Helm interface.
type MockHelm struct {
ctrl *gomock.Controller
recorder *MockHelmMockRecorder
}
// MockHelmMockRecorder is the mock recorder for MockHelm.
type MockHelmMockRecorder struct {
mock *MockHelm
}
// NewMockHelm creates a new mock instance.
func NewMockHelm(ctrl *gomock.Controller) *MockHelm {
mock := &MockHelm{ctrl: ctrl}
mock.recorder = &MockHelmMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHelm) EXPECT() *MockHelmMockRecorder {
return m.recorder
}
// RegistryLogin mocks base method.
func (m *MockHelm) RegistryLogin(ctx context.Context, registry, username, password string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegistryLogin", ctx, registry, username, password)
ret0, _ := ret[0].(error)
return ret0
}
// RegistryLogin indicates an expected call of RegistryLogin.
func (mr *MockHelmMockRecorder) RegistryLogin(ctx, registry, username, password interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegistryLogin", reflect.TypeOf((*MockHelm)(nil).RegistryLogin), ctx, registry, username, password)
}
// Template mocks base method.
func (m *MockHelm) Template(ctx context.Context, ociURI, version, namespace string, values interface{}, kubeVersion string) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Template", ctx, ociURI, version, namespace, values, kubeVersion)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Template indicates an expected call of Template.
func (mr *MockHelmMockRecorder) Template(ctx, ociURI, version, namespace, values, kubeVersion interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Template", reflect.TypeOf((*MockHelm)(nil).Template), ctx, ociURI, version, namespace, values, kubeVersion)
}
| 65 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networking/cilium/installer.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
cilium "github.com/aws/eks-anywhere/pkg/networking/cilium"
gomock "github.com/golang/mock/gomock"
)
// MockInstallTemplater is a mock of InstallTemplater interface.
type MockInstallTemplater struct {
ctrl *gomock.Controller
recorder *MockInstallTemplaterMockRecorder
}
// MockInstallTemplaterMockRecorder is the mock recorder for MockInstallTemplater.
type MockInstallTemplaterMockRecorder struct {
mock *MockInstallTemplater
}
// NewMockInstallTemplater creates a new mock instance.
func NewMockInstallTemplater(ctrl *gomock.Controller) *MockInstallTemplater {
mock := &MockInstallTemplater{ctrl: ctrl}
mock.recorder = &MockInstallTemplaterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockInstallTemplater) EXPECT() *MockInstallTemplaterMockRecorder {
return m.recorder
}
// GenerateManifest mocks base method.
func (m *MockInstallTemplater) GenerateManifest(ctx context.Context, spec *cluster.Spec, opts ...cilium.ManifestOpt) ([]byte, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, spec}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GenerateManifest", varargs...)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GenerateManifest indicates an expected call of GenerateManifest.
func (mr *MockInstallTemplaterMockRecorder) GenerateManifest(ctx, spec interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, spec}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateManifest", reflect.TypeOf((*MockInstallTemplater)(nil).GenerateManifest), varargs...)
}
| 58 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networking/cilium/upgrader.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
cilium "github.com/aws/eks-anywhere/pkg/networking/cilium"
types "github.com/aws/eks-anywhere/pkg/types"
gomock "github.com/golang/mock/gomock"
)
// MockKubernetesClient is a mock of KubernetesClient interface.
type MockKubernetesClient struct {
ctrl *gomock.Controller
recorder *MockKubernetesClientMockRecorder
}
// MockKubernetesClientMockRecorder is the mock recorder for MockKubernetesClient.
type MockKubernetesClientMockRecorder struct {
mock *MockKubernetesClient
}
// NewMockKubernetesClient creates a new mock instance.
func NewMockKubernetesClient(ctrl *gomock.Controller) *MockKubernetesClient {
mock := &MockKubernetesClient{ctrl: ctrl}
mock.recorder = &MockKubernetesClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubernetesClient) EXPECT() *MockKubernetesClientMockRecorder {
return m.recorder
}
// Apply mocks base method.
func (m *MockKubernetesClient) Apply(ctx context.Context, cluster *types.Cluster, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Apply", ctx, cluster, data)
ret0, _ := ret[0].(error)
return ret0
}
// Apply indicates an expected call of Apply.
func (mr *MockKubernetesClientMockRecorder) Apply(ctx, cluster, data interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockKubernetesClient)(nil).Apply), ctx, cluster, data)
}
// Delete mocks base method.
func (m *MockKubernetesClient) Delete(ctx context.Context, cluster *types.Cluster, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, cluster, data)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockKubernetesClientMockRecorder) Delete(ctx, cluster, data interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubernetesClient)(nil).Delete), ctx, cluster, data)
}
// RolloutRestartCiliumDaemonSet mocks base method.
func (m *MockKubernetesClient) RolloutRestartCiliumDaemonSet(ctx context.Context, cluster *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RolloutRestartCiliumDaemonSet", ctx, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// RolloutRestartCiliumDaemonSet indicates an expected call of RolloutRestartCiliumDaemonSet.
func (mr *MockKubernetesClientMockRecorder) RolloutRestartCiliumDaemonSet(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RolloutRestartCiliumDaemonSet", reflect.TypeOf((*MockKubernetesClient)(nil).RolloutRestartCiliumDaemonSet), ctx, cluster)
}
// WaitForCiliumDaemonSet mocks base method.
func (m *MockKubernetesClient) WaitForCiliumDaemonSet(ctx context.Context, cluster *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForCiliumDaemonSet", ctx, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForCiliumDaemonSet indicates an expected call of WaitForCiliumDaemonSet.
func (mr *MockKubernetesClientMockRecorder) WaitForCiliumDaemonSet(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForCiliumDaemonSet", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForCiliumDaemonSet), ctx, cluster)
}
// WaitForCiliumDeployment mocks base method.
func (m *MockKubernetesClient) WaitForCiliumDeployment(ctx context.Context, cluster *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForCiliumDeployment", ctx, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForCiliumDeployment indicates an expected call of WaitForCiliumDeployment.
func (mr *MockKubernetesClientMockRecorder) WaitForCiliumDeployment(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForCiliumDeployment", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForCiliumDeployment), ctx, cluster)
}
// WaitForPreflightDaemonSet mocks base method.
func (m *MockKubernetesClient) WaitForPreflightDaemonSet(ctx context.Context, cluster *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForPreflightDaemonSet", ctx, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForPreflightDaemonSet indicates an expected call of WaitForPreflightDaemonSet.
func (mr *MockKubernetesClientMockRecorder) WaitForPreflightDaemonSet(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForPreflightDaemonSet", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForPreflightDaemonSet), ctx, cluster)
}
// WaitForPreflightDeployment mocks base method.
func (m *MockKubernetesClient) WaitForPreflightDeployment(ctx context.Context, cluster *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForPreflightDeployment", ctx, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForPreflightDeployment indicates an expected call of WaitForPreflightDeployment.
func (mr *MockKubernetesClientMockRecorder) WaitForPreflightDeployment(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForPreflightDeployment", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForPreflightDeployment), ctx, cluster)
}
// MockUpgradeTemplater is a mock of UpgradeTemplater interface.
type MockUpgradeTemplater struct {
ctrl *gomock.Controller
recorder *MockUpgradeTemplaterMockRecorder
}
// MockUpgradeTemplaterMockRecorder is the mock recorder for MockUpgradeTemplater.
type MockUpgradeTemplaterMockRecorder struct {
mock *MockUpgradeTemplater
}
// NewMockUpgradeTemplater creates a new mock instance.
func NewMockUpgradeTemplater(ctrl *gomock.Controller) *MockUpgradeTemplater {
mock := &MockUpgradeTemplater{ctrl: ctrl}
mock.recorder = &MockUpgradeTemplaterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockUpgradeTemplater) EXPECT() *MockUpgradeTemplaterMockRecorder {
return m.recorder
}
// GenerateManifest mocks base method.
func (m *MockUpgradeTemplater) GenerateManifest(ctx context.Context, spec *cluster.Spec, opts ...cilium.ManifestOpt) ([]byte, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, spec}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GenerateManifest", varargs...)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GenerateManifest indicates an expected call of GenerateManifest.
func (mr *MockUpgradeTemplaterMockRecorder) GenerateManifest(ctx, spec interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, spec}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateManifest", reflect.TypeOf((*MockUpgradeTemplater)(nil).GenerateManifest), varargs...)
}
// GenerateUpgradePreflightManifest mocks base method.
func (m *MockUpgradeTemplater) GenerateUpgradePreflightManifest(ctx context.Context, spec *cluster.Spec) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GenerateUpgradePreflightManifest", ctx, spec)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GenerateUpgradePreflightManifest indicates an expected call of GenerateUpgradePreflightManifest.
func (mr *MockUpgradeTemplaterMockRecorder) GenerateUpgradePreflightManifest(ctx, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateUpgradePreflightManifest", reflect.TypeOf((*MockUpgradeTemplater)(nil).GenerateUpgradePreflightManifest), ctx, spec)
}
| 195 |
eks-anywhere | aws | Go | package reconciler
import (
"context"
appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
)
type preflightInstallation struct {
daemonSet *appsv1.DaemonSet
deployment *appsv1.Deployment
}
func (p *preflightInstallation) installed() bool {
return p.daemonSet != nil && p.deployment != nil
}
func getPreflightInstallation(ctx context.Context, client client.Client) (*preflightInstallation, error) {
ds, err := getPreflightDaemonSet(ctx, client)
if err != nil {
return nil, err
}
deployment, err := getPreflightDeployment(ctx, client)
if err != nil {
return nil, err
}
return &preflightInstallation{
daemonSet: ds,
deployment: deployment,
}, nil
}
func getPreflightDeployment(ctx context.Context, client client.Client) (*appsv1.Deployment, error) {
deployment := &appsv1.Deployment{}
key := types.NamespacedName{
Name: cilium.PreflightDeploymentName,
Namespace: constants.KubeSystemNamespace,
}
err := client.Get(ctx, key, deployment)
switch {
case apierrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, err
}
return deployment, nil
}
func getPreflightDaemonSet(ctx context.Context, client client.Client) (*appsv1.DaemonSet, error) {
ds := &appsv1.DaemonSet{}
key := types.NamespacedName{Name: cilium.PreflightDaemonSetName, Namespace: constants.KubeSystemNamespace}
err := client.Get(ctx, key, ds)
switch {
case apierrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, err
}
return ds, nil
}
| 71 |
eks-anywhere | aws | Go | package reconciler_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/internal/test/envtest"
)
var env *envtest.Environment
func TestMain(m *testing.M) {
os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env)))
}
| 15 |
eks-anywhere | aws | Go | package reconciler
import (
"context"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
// EKSACiliumInstalledAnnotation indicates a cluster has previously been observed to have
// EKS-A Cilium installed irrespective of whether its still installed.
const EKSACiliumInstalledAnnotation = "anywhere.eks.amazonaws.com/eksa-cilium"
// ciliumWasInstalled checks cluster for the EKSACiliumInstalledAnnotation.
func ciliumWasInstalled(ctx context.Context, cluster *v1alpha1.Cluster) bool {
if cluster.Annotations == nil {
return false
}
_, ok := cluster.Annotations[EKSACiliumInstalledAnnotation]
return ok
}
// markCiliumInstalled populates the EKSACiliumInstalledAnnotation on cluster. It may trigger
// anothe reconciliation event.
func markCiliumInstalled(ctx context.Context, cluster *v1alpha1.Cluster) {
clientutil.AddAnnotation(cluster, EKSACiliumInstalledAnnotation, "")
}
| 28 |
eks-anywhere | aws | Go | package reconciler
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/controller/serverside"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/utils/oci"
)
const (
defaultRequeueTime = time.Second * 10
)
type Templater interface {
GenerateUpgradePreflightManifest(ctx context.Context, spec *cluster.Spec) ([]byte, error)
GenerateManifest(ctx context.Context, spec *cluster.Spec, opts ...cilium.ManifestOpt) ([]byte, error)
}
// Reconciler allows to reconcile a Cilium CNI.
type Reconciler struct {
templater Templater
}
func New(templater Templater) *Reconciler {
return &Reconciler{
templater: templater,
}
}
// Reconcile takes the Cilium CNI in a cluster to the desired state defined in a cluster Spec.
// It uses a controller.Result to indicate when requeues are needed. client is connected to the
// target Kubernetes cluster, not the management cluster.
// nolint:gocyclo
// TODO: reduce cyclomatic complexity - https://github.com/aws/eks-anywhere-internal/issues/1461
func (r *Reconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (res controller.Result, reterr error) {
installation, err := cilium.GetInstallation(ctx, client)
if err != nil {
return controller.Result{}, err
}
// We use a marker to detect if EKS-A Cilium has ever been installed. If it has never been
// installed and isn't currently installed we always attempt to install it regardless of whether
// the user is skipping EKS-A Cilium management. This satsifies criteria for successful cluster
// creation.
//
// If EKS-A Cilium was previously installed, as denoted by the marker, we only want to
// manage it if its still installed and the user still wants us to manage the installation (as
// denoted by the API skip flag).
//
// In the event a user uninstalls EKS-A Cilium, updates the cluster spec to skip EKS-A Cilium
// management, then tries to upgrade, we will attempt to install EKS-A Cilium. This is because
// reconciliation has no operational context (create vs upgrade) and can only observe that no
// installation is present and there is no marker indicating it was ever present which is
// equivilent to a typical create scenario where we must install a CNI to satisfy cluster
// create success criteria.
// To accommodate upgrades of cluster created prior to introducing markers, we check for
// an existing installation and try to mark the cluster as having already had EKS-A
// Cilium installed.
if !ciliumWasInstalled(ctx, spec.Cluster) && installation.Installed() {
logger.Info(fmt.Sprintf(
"Cilium installed but missing %v annotation; applying annotation",
EKSACiliumInstalledAnnotation,
))
markCiliumInstalled(ctx, spec.Cluster)
}
ciliumCfg := spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium
if !installation.Installed() &&
(ciliumCfg.IsManaged() || !ciliumWasInstalled(ctx, spec.Cluster)) {
if err := r.install(ctx, logger, client, spec); err != nil {
return controller.Result{}, err
}
logger.Info(fmt.Sprintf(
"Applying %v annotation to Cluster object",
EKSACiliumInstalledAnnotation,
))
markCiliumInstalled(ctx, spec.Cluster)
conditions.MarkTrue(spec.Cluster, anywherev1.DefaultCNIConfiguredCondition)
return controller.Result{}, nil
}
if !ciliumCfg.IsManaged() {
logger.Info("Cilium configured as unmanaged, skipping upgrade")
conditions.MarkFalse(spec.Cluster, anywherev1.DefaultCNIConfiguredCondition, anywherev1.SkipUpgradesForDefaultCNIConfiguredReason, clusterv1.ConditionSeverityWarning, "Configured to skip default Cilium CNI upgrades")
return controller.Result{}, nil
}
logger.Info("Cilium is already installed, checking if it needs upgrade")
upgradeInfo := cilium.BuildUpgradePlan(installation, spec)
if upgradeInfo.VersionUpgradeNeeded() {
logger.Info("Cilium upgrade needed", "reason", upgradeInfo.Reason())
if result, err := r.upgrade(ctx, logger, client, installation, spec); err != nil {
return controller.Result{}, err
} else if result.Return() {
conditions.MarkFalse(spec.Cluster, anywherev1.DefaultCNIConfiguredCondition, anywherev1.DefaultCNIUpgradeInProgressReason, clusterv1.ConditionSeverityInfo, "Cilium version upgrade needed")
return result, nil
}
} else if upgradeInfo.ConfigUpdateNeeded() {
logger.Info("Cilium config update needed", "reason", upgradeInfo.Reason())
if err := r.updateConfig(ctx, client, spec); err != nil {
return controller.Result{}, err
}
} else {
logger.Info("Cilium is already up to date")
}
// Upgrade process has run its course, and so we can now mark that the default cni has been configured.
conditions.MarkTrue(spec.Cluster, anywherev1.DefaultCNIConfiguredCondition)
return r.deletePreflightIfExists(ctx, client, spec)
}
func (r *Reconciler) install(ctx context.Context, log logr.Logger, client client.Client, spec *cluster.Spec) error {
log.Info("Installing Cilium")
if err := r.applyFullManifest(ctx, client, spec); err != nil {
return errors.Wrap(err, "installing Cilium")
}
return nil
}
func (r *Reconciler) upgrade(ctx context.Context, logger logr.Logger, client client.Client, installation *cilium.Installation, spec *cluster.Spec) (controller.Result, error) {
if err := cilium.CheckDaemonSetReady(installation.DaemonSet); err != nil {
logger.Info("Cilium DS is not ready, requeueing", "reason", err.Error())
return controller.Result{Result: &ctrl.Result{
RequeueAfter: defaultRequeueTime,
}}, nil
}
preflightInstallation, err := getPreflightInstallation(ctx, client)
if err != nil {
return controller.Result{}, err
}
if !preflightInstallation.installed() {
logger.Info("Installing Cilium upgrade preflight manifest")
if err = r.installPreflight(ctx, client, spec); err != nil {
return controller.Result{}, err
}
preflightInstallation, err = getPreflightInstallation(ctx, client)
if err != nil {
return controller.Result{}, err
}
if !preflightInstallation.installed() {
logger.Info("Cilium preflight is not available yet, requeueing")
return controller.Result{Result: &ctrl.Result{
RequeueAfter: defaultRequeueTime,
}}, nil
}
}
if err = cilium.CheckPreflightDaemonSetReady(installation.DaemonSet, preflightInstallation.daemonSet); err != nil {
logger.Info("Cilium preflight daemon set is not ready, requeueing", "reason", err.Error())
return controller.Result{Result: &ctrl.Result{
RequeueAfter: defaultRequeueTime,
}}, nil
}
if err = cilium.CheckDeploymentReady(preflightInstallation.deployment); err != nil {
logger.Info("Cilium preflight deployment is not ready, requeueing", "reason", err.Error())
return controller.Result{Result: &ctrl.Result{
RequeueAfter: defaultRequeueTime,
}}, nil
}
logger.Info("Generating Cilium upgrade manifest")
dsImage := installation.DaemonSet.Spec.Template.Spec.Containers[0].Image
_, dsImageTag := oci.Split(dsImage)
previousCiliumVersion, err := semver.New(dsImageTag)
if err != nil {
return controller.Result{}, errors.Wrapf(err, "installed cilium DS has an invalid version tag: %s", dsImage)
}
upgradeManifest, err := r.templater.GenerateManifest(ctx, spec,
cilium.WithUpgradeFromVersion(*previousCiliumVersion),
)
if err != nil {
return controller.Result{}, err
}
logger.Info("Applying Cilium upgrade manifest")
if err := serverside.ReconcileYaml(ctx, client, upgradeManifest); err != nil {
return controller.Result{}, err
}
return controller.Result{}, nil
}
func (r *Reconciler) updateConfig(ctx context.Context, client client.Client, spec *cluster.Spec) error {
if err := r.applyFullManifest(ctx, client, spec); err != nil {
return errors.Wrap(err, "updating cilium config")
}
return nil
}
func (r *Reconciler) applyFullManifest(ctx context.Context, client client.Client, spec *cluster.Spec) error {
upgradeManifest, err := r.templater.GenerateManifest(ctx, spec)
if err != nil {
return err
}
return serverside.ReconcileYaml(ctx, client, upgradeManifest)
}
func (r *Reconciler) deletePreflightIfExists(ctx context.Context, client client.Client, spec *cluster.Spec) (controller.Result, error) {
preFlightCiliumDS, err := getPreflightDaemonSet(ctx, client)
if err != nil {
return controller.Result{}, err
}
if preFlightCiliumDS != nil {
preflight, err := r.templater.GenerateUpgradePreflightManifest(ctx, spec)
if err != nil {
return controller.Result{}, err
}
logger.Info("Deleting Preflight Cilium objects")
if err := clientutil.DeleteYaml(ctx, client, preflight); err != nil {
return controller.Result{}, err
}
}
return controller.Result{}, nil
}
func (r *Reconciler) installPreflight(ctx context.Context, client client.Client, spec *cluster.Spec) error {
preflight, err := r.templater.GenerateUpgradePreflightManifest(ctx, spec)
if err != nil {
return err
}
if err = serverside.ReconcileYaml(ctx, client, preflight); err != nil {
return err
}
return nil
}
| 263 |
eks-anywhere | aws | Go | package reconciler_test
import (
"context"
"errors"
"testing"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/networking/cilium/reconciler"
"github.com/aws/eks-anywhere/pkg/networking/cilium/reconciler/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestReconcilerReconcileInstall(t *testing.T) {
tt := newReconcileTest(t)
ds := ciliumDaemonSet()
operator := ciliumOperator()
manifest := buildManifest(tt.WithT, ds, operator)
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec).Return(manifest, nil)
tt.Expect(
tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec),
).To(Equal(controller.Result{}))
tt.expectDaemonSetSemanticallyEqual(ds)
tt.expectOperatorSemanticallyEqual(operator)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("True", "", "", ""))
}
func TestReconcilerReconcileInstallErrorGeneratingManifest(t *testing.T) {
tt := newReconcileTest(t)
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec).Return(nil, errors.New("generating manifest"))
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("generating manifest")))
}
func TestReconcilerReconcileErrorYamlReconcile(t *testing.T) {
tt := newReconcileTest(t)
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec).Return([]byte("invalid yaml"), nil)
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("error unmarshaling JSON")))
}
func TestReconcilerReconcileAlreadyUpToDate(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
cm := ciliumConfigMap()
tt := newReconcileTest(t).withObjects(ds, operator, cm)
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectDaemonSetSemanticallyEqual(ds)
tt.expectOperatorSemanticallyEqual(operator)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("True", "", "", ""))
}
func TestReconcilerReconcileAlreadyInDesiredVersionWithPreflight(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
preflightDS := ciliumPreflightDaemonSet()
cm := ciliumConfigMap()
preflightDeployment := ciliumPreflightDeployment()
tt := newReconcileTest(t)
// for deleting the preflight
preflightManifest := tt.buildManifest(preflightDS, preflightDeployment)
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec).Return(preflightManifest, nil)
tt.withObjects(ds, operator, preflightDS, preflightDeployment, cm)
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectDaemonSetSemanticallyEqual(ds)
tt.expectOperatorSemanticallyEqual(operator)
tt.expectDSToNotExist(preflightDS.Name, preflightDS.Namespace)
tt.expectDeploymentToNotExist(preflightDeployment.Name, preflightDeployment.Namespace)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("True", "", "", ""))
}
func TestReconcilerReconcileAlreadyInDesiredVersionWithPreflightErrorFromTemplater(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
cm := ciliumConfigMap()
preflightDS := ciliumPreflightDaemonSet()
preflightDeployment := ciliumPreflightDeployment()
tt := newReconcileTest(t)
// for deleting the preflight
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec).Return(nil, errors.New("generating preflight"))
tt.withObjects(ds, operator, cm, preflightDS, preflightDeployment)
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("generating preflight")))
}
func TestReconcilerReconcileAlreadyInDesiredVersionWithPreflightErrorDeletingYaml(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
cm := ciliumConfigMap()
preflightDS := ciliumPreflightDaemonSet()
preflightDeployment := ciliumPreflightDeployment()
tt := newReconcileTest(t)
// for deleting the preflight
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec).Return([]byte("invalid yaml"), nil)
tt.withObjects(ds, operator, cm, preflightDS, preflightDeployment)
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("error unmarshaling JSON")))
}
func TestReconcilerReconcileUpgradeButCiliumDaemonSetNotReady(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
tt := newReconcileTest(t).withObjects(ds, operator)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.11.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.11.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.ResultWithRequeue(10 * time.Second)),
)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.DefaultCNIUpgradeInProgressReason, v1beta1.ConditionSeverityInfo, "Cilium version upgrade needed"))
}
func TestReconcilerReconcileUpgradeNeedsPreflightAndPreflightDaemonSetNotAvailable(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
tt := newReconcileTest(t).withObjects(ds, operator)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.11.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.11.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec)
tt.makeCiliumDaemonSetReady()
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.ResultWithRequeue(10 * time.Second)),
)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.DefaultCNIUpgradeInProgressReason, v1beta1.ConditionSeverityInfo, "Cilium version upgrade needed"))
}
func TestReconcilerReconcileUpgradeErrorGeneratingPreflight(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
tt := newReconcileTest(t).withObjects(ds, operator)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.11.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.11.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec).Return(nil, errors.New("generating preflight"))
tt.makeCiliumDaemonSetReady()
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("generating preflight")))
}
func TestReconcilerReconcileUpgradeNeedsPreflightAndPreflightDeploymentNotAvailable(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
tt := newReconcileTest(t).withObjects(ds, operator)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.11.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.11.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
preflightManifest := tt.buildManifest(ciliumPreflightDaemonSet())
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec).Return(preflightManifest, nil)
tt.makeCiliumDaemonSetReady()
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.ResultWithRequeue(10 * time.Second)),
)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.DefaultCNIUpgradeInProgressReason, v1beta1.ConditionSeverityInfo, "Cilium version upgrade needed"))
}
func TestReconcilerReconcileUpgradeNeedsPreflightAndPreflightNotReady(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
tt := newReconcileTest(t).withObjects(ds, operator)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.11.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.11.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
preflightManifest := tt.buildManifest(ciliumPreflightDaemonSet(), ciliumPreflightDeployment())
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec).Return(preflightManifest, nil)
tt.makeCiliumDaemonSetReady()
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.ResultWithRequeue(10 * time.Second)),
)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.DefaultCNIUpgradeInProgressReason, v1beta1.ConditionSeverityInfo, "Cilium version upgrade needed"))
}
func TestReconcilerReconcileUpgradePreflightDaemonSetNotReady(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
tt := newReconcileTest(t).withObjects(ds, operator, ciliumPreflightDaemonSet(), ciliumPreflightDeployment())
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.11.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.11.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.makeCiliumDaemonSetReady()
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.ResultWithRequeue(10 * time.Second)),
)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.DefaultCNIUpgradeInProgressReason, v1beta1.ConditionSeverityInfo, "Cilium version upgrade needed"))
}
func TestReconcilerReconcileUpgradePreflightDeploymentSetNotReady(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
preflight := ciliumPreflightDaemonSet()
tt := newReconcileTest(t).withObjects(ds, operator, preflight, ciliumPreflightDeployment())
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.11.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.11.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.makeCiliumDaemonSetReady()
tt.makePreflightDaemonSetReady()
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.ResultWithRequeue(10 * time.Second)),
)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.DefaultCNIUpgradeInProgressReason, v1beta1.ConditionSeverityInfo, "Cilium version upgrade needed"))
}
func TestReconcilerReconcileUpgradeInvalidCiliumInstalledVersion(t *testing.T) {
ds := ciliumDaemonSet()
ds.Spec.Template.Spec.Containers[0].Image = "cilium:eksa-invalid-version"
operator := ciliumOperator()
preflight := ciliumPreflightDaemonSet()
newDSImage := "cilium:1.11.1-eksa-1"
newOperatorImage := "cilium-operator:1.11.1-eksa-1"
tt := newReconcileTest(t).withObjects(ds, operator, preflight, ciliumPreflightDeployment())
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = newDSImage
s.VersionsBundle.Cilium.Operator.URI = newOperatorImage
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.makeCiliumDaemonSetReady()
tt.makePreflightDaemonSetReady()
tt.makePreflightDeploymentReady()
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("installed cilium DS has an invalid version tag")))
}
func TestReconcilerReconcileUpgradeErrorGeneratingManifest(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
preflight := ciliumPreflightDaemonSet()
newDSImage := "cilium:1.11.1-eksa-1"
newOperatorImage := "cilium-operator:1.11.1-eksa-1"
tt := newReconcileTest(t).withObjects(ds, operator, preflight, ciliumPreflightDeployment())
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = newDSImage
s.VersionsBundle.Cilium.Operator.URI = newOperatorImage
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec, gomock.Not(gomock.Nil())).Return(nil, errors.New("generating manifest"))
tt.makeCiliumDaemonSetReady()
tt.makePreflightDaemonSetReady()
tt.makePreflightDeploymentReady()
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("generating manifest")))
tt.expectCiliumInstalledAnnotation()
}
func TestReconcilerReconcileUpgradePreflightErrorYamlReconcile(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
preflight := ciliumPreflightDaemonSet()
newDSImage := "cilium:1.11.1-eksa-1"
newOperatorImage := "cilium-operator:1.11.1-eksa-1"
tt := newReconcileTest(t).withObjects(ds, operator, preflight, ciliumPreflightDeployment())
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = newDSImage
s.VersionsBundle.Cilium.Operator.URI = newOperatorImage
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec, gomock.Not(gomock.Nil())).Return([]byte("invalid yaml"), nil)
tt.makeCiliumDaemonSetReady()
tt.makePreflightDaemonSetReady()
tt.makePreflightDeploymentReady()
result, err := tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(err).To(MatchError(ContainSubstring("error unmarshaling JSON")))
tt.expectCiliumInstalledAnnotation()
}
func TestReconcilerReconcileUpgradePreflightReady(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
preflight := ciliumPreflightDaemonSet()
newDSImage := "cilium:1.11.1-eksa-1"
newOperatorImage := "cilium-operator:1.11.1-eksa-1"
wantDS := ds.DeepCopy()
wantDS.Spec.Template.Spec.Containers[0].Image = newDSImage
wantOperator := operator.DeepCopy()
wantOperator.Spec.Template.Spec.Containers[0].Image = newOperatorImage
tt := newReconcileTest(t).withObjects(ds, operator, preflight, ciliumPreflightDeployment())
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = newDSImage
s.VersionsBundle.Cilium.Operator.URI = newOperatorImage
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
}
})
upgradeManifest := tt.buildManifest(wantDS, wantOperator)
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec, gomock.Not(gomock.Nil())).Return(upgradeManifest, nil)
// for deleting the preflight
preflightManifest := tt.buildManifest(ciliumPreflightDaemonSet(), ciliumPreflightDeployment())
tt.templater.EXPECT().GenerateUpgradePreflightManifest(tt.ctx, tt.spec).Return(preflightManifest, nil)
tt.makeCiliumDaemonSetReady()
tt.makePreflightDaemonSetReady()
tt.makePreflightDeploymentReady()
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("True", "", "", ""))
}
func TestReconcilerReconcileUpdateConfigConfigMapEnablePolicyChange(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
cm := ciliumConfigMap()
tt := newReconcileTest(t).withObjects(ds, operator, cm)
newDSImage := "cilium:1.10.1-eksa-1"
newOperatorImage := "cilium-operator:1.10.1-eksa-1"
upgradeManifest := tt.buildManifest(ds, operator, cm)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = newDSImage
s.VersionsBundle.Cilium.Operator.URI = newOperatorImage
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
PolicyEnforcementMode: "always",
},
}
})
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec, gomock.Not(gomock.Nil())).Return(upgradeManifest, nil)
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("True", "", "", ""))
}
func TestReconcilerReconcileSkipUpgradeWithoutCiliumInstalled(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
cm := ciliumConfigMap()
tt := newReconcileTest(t)
upgradeManifest := tt.buildManifest(ds, operator, cm)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
}
})
tt.templater.EXPECT().GenerateManifest(tt.ctx, tt.spec).Return(upgradeManifest, nil)
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectDaemonSetSemanticallyEqual(ds)
tt.expectOperatorSemanticallyEqual(operator)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("True", "", "", ""))
}
func TestReconcilerReconcileSkipUpgradeWithCiliumInstalled(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
cm := ciliumConfigMap()
tt := newReconcileTest(t).withObjects(ds, operator, cm)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
}
})
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectDaemonSetSemanticallyEqual(ds)
tt.expectOperatorSemanticallyEqual(operator)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.SkipUpgradesForDefaultCNIConfiguredReason, v1beta1.ConditionSeverityWarning, "Configured to skip default Cilium CNI upgrades"))
}
func TestReconcilerReconcileSkipUpgradeWithAnnotationWithoutCilium(t *testing.T) {
tt := newReconcileTest(t)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
}
s.Cluster.Annotations = map[string]string{
reconciler.EKSACiliumInstalledAnnotation: "true",
}
})
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.SkipUpgradesForDefaultCNIConfiguredReason, v1beta1.ConditionSeverityWarning, "Configured to skip default Cilium CNI upgrades"))
}
func TestReconcilerReconcileSkipUpgradeWithAnnotationWithCilium(t *testing.T) {
ds := ciliumDaemonSet()
operator := ciliumOperator()
cm := ciliumConfigMap()
tt := newReconcileTest(t).withObjects(ds, operator, cm)
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
}
s.Cluster.Name = rand.String(10)
s.Cluster.Namespace = "default"
s.Cluster.Annotations = map[string]string{
reconciler.EKSACiliumInstalledAnnotation: "true",
}
})
if err := tt.client.Create(context.Background(), tt.spec.Cluster); err != nil {
t.Fatal(err)
}
tt.Expect(tt.reconciler.Reconcile(tt.ctx, test.NewNullLogger(), tt.client, tt.spec)).To(
Equal(controller.Result{}),
)
tt.expectDaemonSetSemanticallyEqual(ds)
tt.expectOperatorSemanticallyEqual(operator)
tt.expectCiliumInstalledAnnotation()
tt.expectDefaultCNIConfigured(defaultCNIConfiguredCondition("False", anywherev1.SkipUpgradesForDefaultCNIConfiguredReason, v1beta1.ConditionSeverityWarning, "Configured to skip default Cilium CNI upgrades"))
}
type reconcileTest struct {
*WithT
t *testing.T
ctx context.Context
env *envtest.Environment
spec *cluster.Spec
client client.Client
templater *mocks.MockTemplater
reconciler *reconciler.Reconciler
}
func newReconcileTest(t *testing.T) *reconcileTest {
ctrl := gomock.NewController(t)
templater := mocks.NewMockTemplater(ctrl)
tt := &reconcileTest{
WithT: NewWithT(t),
t: t,
ctx: context.Background(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Cilium.Cilium.URI = "cilium:1.10.1-eksa-1"
s.VersionsBundle.Cilium.Operator.URI = "cilium-operator:1.10.1-eksa-1"
s.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
PolicyEnforcementMode: "default",
},
}
}),
client: env.Client(),
env: env,
templater: templater,
reconciler: reconciler.New(templater),
}
t.Cleanup(tt.cleanup)
return tt
}
func (tt *reconcileTest) cleanup() {
tt.Expect(tt.client.DeleteAllOf(tt.ctx, &appsv1.DaemonSet{}, client.InNamespace("kube-system")))
tt.Expect(tt.client.DeleteAllOf(tt.ctx, &appsv1.Deployment{}, client.InNamespace("kube-system")))
tt.Expect(tt.client.DeleteAllOf(tt.ctx, &corev1.ConfigMap{}, client.InNamespace("kube-system")))
tt.Expect(tt.client.DeleteAllOf(tt.ctx, &anywherev1.Cluster{}))
}
func (tt *reconcileTest) withObjects(objs ...client.Object) *reconcileTest {
tt.t.Helper()
envtest.CreateObjs(tt.ctx, tt.t, tt.client, objs...)
return tt
}
func (tt *reconcileTest) expectDSToNotExist(name, namespace string) {
tt.t.Helper()
err := tt.env.APIReader().Get(tt.ctx, types.NamespacedName{Name: name, Namespace: namespace}, &appsv1.DaemonSet{})
tt.Expect(apierrors.IsNotFound(err)).To(BeTrue(), "DaemonSet %s should not exist", name)
}
func (tt *reconcileTest) expectDeploymentToNotExist(name, namespace string) {
tt.t.Helper()
err := tt.env.APIReader().Get(tt.ctx, types.NamespacedName{Name: name, Namespace: namespace}, &appsv1.Deployment{})
tt.Expect(apierrors.IsNotFound(err)).To(BeTrue(), "Deployment %s should not exist", name)
}
func (tt *reconcileTest) getDaemonSet(name, namespace string) *appsv1.DaemonSet {
tt.t.Helper()
ds := &appsv1.DaemonSet{}
tt.Expect(tt.env.APIReader().Get(tt.ctx, types.NamespacedName{Name: name, Namespace: namespace}, ds)).To(Succeed())
return ds
}
func (tt *reconcileTest) getDeployment(name, namespace string) *appsv1.Deployment {
tt.t.Helper()
deployment := &appsv1.Deployment{}
tt.Expect(tt.env.APIReader().Get(tt.ctx, types.NamespacedName{Name: name, Namespace: namespace}, deployment)).To(Succeed())
return deployment
}
func (tt *reconcileTest) getCiliumOperator() *appsv1.Deployment {
tt.t.Helper()
return tt.getDeployment(cilium.DeploymentName, "kube-system")
}
func (tt *reconcileTest) getCiliumDaemonSet() *appsv1.DaemonSet {
tt.t.Helper()
return tt.getDaemonSet(cilium.DaemonSetName, "kube-system")
}
func (tt *reconcileTest) makeCiliumDaemonSetReady() {
tt.t.Helper()
tt.makeDaemonSetReady(cilium.DaemonSetName, "kube-system")
}
func (tt *reconcileTest) makePreflightDaemonSetReady() {
tt.t.Helper()
tt.makeDaemonSetReady(cilium.PreflightDaemonSetName, "kube-system")
}
func (tt *reconcileTest) makeDaemonSetReady(name, namespace string) {
tt.t.Helper()
ds := tt.getDaemonSet(name, namespace)
ds.Status.ObservedGeneration = ds.Generation
tt.Expect(tt.client.Status().Update(tt.ctx, ds)).To(Succeed())
// wait for cache to refresh
r := retrier.New(1*time.Second, retrier.WithRetryPolicy(func(totalRetries int, err error) (retry bool, wait time.Duration) {
return true, 50 * time.Millisecond
}))
tt.Expect(
r.Retry(func() error {
ds := &appsv1.DaemonSet{}
tt.Expect(tt.client.Get(tt.ctx, types.NamespacedName{Name: name, Namespace: namespace}, ds)).To(Succeed())
if ds.Status.ObservedGeneration != ds.Generation {
return errors.New("ds cache not updated yet")
}
return nil
}),
).To(Succeed())
}
func (tt *reconcileTest) makePreflightDeploymentReady() {
tt.t.Helper()
tt.makeDeploymentReady(cilium.PreflightDeploymentName, "kube-system")
}
func (tt *reconcileTest) makeDeploymentReady(name, namespace string) {
tt.t.Helper()
deployment := tt.getDeployment(name, namespace)
deployment.Status.ObservedGeneration = deployment.Generation
tt.Expect(tt.client.Status().Update(tt.ctx, deployment)).To(Succeed())
// wait for cache to refresh
r := retrier.New(1*time.Second, retrier.WithRetryPolicy(func(totalRetries int, err error) (retry bool, wait time.Duration) {
return true, 50 * time.Millisecond
}))
tt.Expect(
r.Retry(func() error {
deployment := &appsv1.Deployment{}
tt.Expect(tt.client.Get(tt.ctx, types.NamespacedName{Name: name, Namespace: namespace}, deployment)).To(Succeed())
if deployment.Status.ObservedGeneration != deployment.Generation {
return errors.New("deployment cache not updated yet")
}
return nil
}),
).To(Succeed())
}
func (tt *reconcileTest) expectDaemonSetSemanticallyEqual(wantDS *appsv1.DaemonSet) {
tt.t.Helper()
gotDS := tt.getCiliumDaemonSet()
tt.Expect(equality.Semantic.DeepDerivative(wantDS.Spec, gotDS.Spec)).To(
BeTrue(), "Cilium DaemonSet should be semantically equivalent",
)
}
func (tt *reconcileTest) expectOperatorSemanticallyEqual(wantOperator *appsv1.Deployment) {
tt.t.Helper()
gotOperator := tt.getCiliumOperator()
tt.Expect(equality.Semantic.DeepDerivative(wantOperator.Spec, gotOperator.Spec)).To(
BeTrue(), "Cilium Operator should be semantically equivalent",
)
}
func (tt *reconcileTest) expectDefaultCNIConfigured(wantCondition *anywherev1.Condition) {
tt.t.Helper()
condition := conditions.Get(tt.spec.Cluster, anywherev1.DefaultCNIConfiguredCondition)
tt.Expect(condition).ToNot(BeNil(), "missing defaultcniconfigured condition")
tt.Expect(condition).To(conditions.HaveSameStateOf(wantCondition))
}
func (tt *reconcileTest) expectCiliumInstalledAnnotation() {
tt.t.Helper()
if tt.spec.Cluster.Annotations == nil {
tt.t.Fatal("missing cilium installed annotation")
}
if _, ok := tt.spec.Cluster.Annotations[reconciler.EKSACiliumInstalledAnnotation]; !ok {
tt.t.Fatal("missing cilium installed annotation")
}
}
func (tt *reconcileTest) buildManifest(objs ...client.Object) []byte {
tt.t.Helper()
return buildManifest(tt.WithT, objs...)
}
func buildManifest(g *WithT, objs ...client.Object) []byte {
manifests := [][]byte{}
for _, obj := range objs {
o, err := yaml.Marshal(obj)
g.Expect(err).ToNot(HaveOccurred(), "Marshall obj for manifest should succeed")
manifests = append(manifests, o)
}
return templater.AppendYamlResources(manifests...)
}
func ciliumDaemonSet() *appsv1.DaemonSet {
return simpleDaemonSet(cilium.DaemonSetName, "cilium:1.10.1-eksa-1")
}
func ciliumOperator() *appsv1.Deployment {
return simpleDeployment(cilium.DeploymentName, "cilium-operator:1.10.1-eksa-1")
}
func ciliumConfigMap() *corev1.ConfigMap {
return simpleConfigMap(cilium.ConfigMapName, "default")
}
func ciliumPreflightDaemonSet() *appsv1.DaemonSet {
return simpleDaemonSet(cilium.PreflightDaemonSetName, "cilium-pre-flight-check:1.10.1-eksa-1")
}
func ciliumPreflightDeployment() *appsv1.Deployment {
return simpleDeployment(cilium.PreflightDeploymentName, "cilium-pre-flight-check:1.10.1-eksa-1")
}
func simpleDeployment(name, image string) *appsv1.Deployment {
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "kube-system",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "cilium",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "cilium",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
func simpleDaemonSet(name, image string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "kube-system",
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "cilium",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "cilium",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
func simpleConfigMap(name, enablePolicy string) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "kube-system",
},
Data: map[string]string{
"enable-policy": enablePolicy,
},
}
}
func defaultCNIConfiguredCondition(status corev1.ConditionStatus, reason string, severity v1beta1.ConditionSeverity, message string) *anywherev1.Condition {
return &anywherev1.Condition{
Type: anywherev1.DefaultCNIConfiguredCondition,
Status: status,
Severity: severity,
Reason: reason,
Message: message,
}
}
| 861 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networking/cilium/reconciler/reconciler.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
cilium "github.com/aws/eks-anywhere/pkg/networking/cilium"
gomock "github.com/golang/mock/gomock"
)
// MockTemplater is a mock of Templater interface.
type MockTemplater struct {
ctrl *gomock.Controller
recorder *MockTemplaterMockRecorder
}
// MockTemplaterMockRecorder is the mock recorder for MockTemplater.
type MockTemplaterMockRecorder struct {
mock *MockTemplater
}
// NewMockTemplater creates a new mock instance.
func NewMockTemplater(ctrl *gomock.Controller) *MockTemplater {
mock := &MockTemplater{ctrl: ctrl}
mock.recorder = &MockTemplaterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockTemplater) EXPECT() *MockTemplaterMockRecorder {
return m.recorder
}
// GenerateManifest mocks base method.
func (m *MockTemplater) GenerateManifest(ctx context.Context, spec *cluster.Spec, opts ...cilium.ManifestOpt) ([]byte, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, spec}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GenerateManifest", varargs...)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GenerateManifest indicates an expected call of GenerateManifest.
func (mr *MockTemplaterMockRecorder) GenerateManifest(ctx, spec interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, spec}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateManifest", reflect.TypeOf((*MockTemplater)(nil).GenerateManifest), varargs...)
}
// GenerateUpgradePreflightManifest mocks base method.
func (m *MockTemplater) GenerateUpgradePreflightManifest(ctx context.Context, spec *cluster.Spec) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GenerateUpgradePreflightManifest", ctx, spec)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GenerateUpgradePreflightManifest indicates an expected call of GenerateUpgradePreflightManifest.
func (mr *MockTemplaterMockRecorder) GenerateUpgradePreflightManifest(ctx, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateUpgradePreflightManifest", reflect.TypeOf((*MockTemplater)(nil).GenerateUpgradePreflightManifest), ctx, spec)
}
| 73 |
eks-anywhere | aws | Go | package kindnetd
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/types"
)
// InstallerForSpec allows to configure kindnetd for a particular EKS-A cluster
// It's a stateful version of installer, with a fixed Cilium config.
type InstallerForSpec struct {
installer *Installer
spec *cluster.Spec
}
// NewInstallerForSpec constructs a new InstallerForSpec.
func NewInstallerForSpec(client Client, reader manifests.FileReader, spec *cluster.Spec) *InstallerForSpec {
return &InstallerForSpec{
installer: NewInstaller(client, reader),
spec: spec,
}
}
// Install installs kindnetd in an cluster.
func (i *InstallerForSpec) Install(ctx context.Context, cluster *types.Cluster) error {
return i.installer.Install(ctx, cluster, i.spec)
}
// Installer allows to configure kindnetd in a cluster.
type Installer struct {
k8s Client
reader manifests.FileReader
}
// NewInstaller constructs a new Installer.
func NewInstaller(client Client, reader manifests.FileReader) *Installer {
return &Installer{
k8s: client,
reader: reader,
}
}
// Install configures kindnetd in an EKS-A cluster.
func (i *Installer) Install(ctx context.Context, cluster *types.Cluster, spec *cluster.Spec) error {
manifest, err := generateManifest(i.reader, spec)
if err != nil {
return fmt.Errorf("generating kindnetd manifest for install: %v", err)
}
if err = i.k8s.ApplyKubeSpecFromBytes(ctx, cluster, manifest); err != nil {
return fmt.Errorf("applying kindnetd manifest for install: %v", err)
}
return nil
}
| 59 |
eks-anywhere | aws | Go | package kindnetd_test
import (
"errors"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/networking/kindnetd"
)
func TestInstallerInstallErrorGeneratingManifest(t *testing.T) {
tt := newKindnetdTest(t)
tt.spec.VersionsBundle.Kindnetd.Manifest.URI = "testdata/missing_manifest.yaml"
tt.Expect(
tt.k.Installer.Install(tt.ctx, tt.cluster, tt.spec),
).To(
MatchError(ContainSubstring("generating kindnetd manifest for install")),
)
}
func TestInstallerInstallErrorApplyingManifest(t *testing.T) {
tt := newKindnetdTest(t)
tt.client.EXPECT().ApplyKubeSpecFromBytes(
tt.ctx,
tt.cluster,
test.MatchFile("testdata/expected_kindnetd_manifest.yaml"),
).Return(errors.New("generating yaml"))
tt.Expect(
tt.k.Installer.Install(tt.ctx, tt.cluster, tt.spec),
).To(
MatchError(ContainSubstring("applying kindnetd manifest for install: generating yaml")),
)
}
func TestInstallerInstallSuccess(t *testing.T) {
tt := newKindnetdTest(t)
tt.client.EXPECT().ApplyKubeSpecFromBytes(
tt.ctx,
tt.cluster,
test.MatchFile("testdata/expected_kindnetd_manifest.yaml"),
)
tt.Expect(
tt.k.Installer.Install(tt.ctx, tt.cluster, tt.spec),
).To(Succeed())
}
func TestInstallForSpecInstallSuccess(t *testing.T) {
tt := newKindnetdTest(t)
installerForSpec := kindnetd.NewInstallerForSpec(tt.client, tt.reader, tt.spec)
tt.client.EXPECT().ApplyKubeSpecFromBytes(
tt.ctx,
tt.cluster,
test.MatchFile("testdata/expected_kindnetd_manifest.yaml"),
)
tt.Expect(
installerForSpec.Install(tt.ctx, tt.cluster),
).To(Succeed())
}
| 65 |
eks-anywhere | aws | Go | package kindnetd
import (
"context"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/types"
)
// Client allows to interact with the Kubernetes API.
type Client interface {
ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error
}
// Kindnetd allows to install and upgrade kindnetd in a an EKS-A cluster.
type Kindnetd struct {
*Upgrader
*Installer
}
// NewKindnetd constructs a new Kindnetd.
func NewKindnetd(client Client, reader manifests.FileReader) *Kindnetd {
return &Kindnetd{
Installer: NewInstaller(client, reader),
Upgrader: NewUpgrader(client, reader),
}
}
// Install install kindnetd CNI in an eks-a docker cluster.
func (c *Kindnetd) Install(ctx context.Context, cluster *types.Cluster, spec *cluster.Spec, _ []string) error {
return c.Installer.Install(ctx, cluster, spec)
}
| 34 |
eks-anywhere | aws | Go | package kindnetd_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/networking/kindnetd"
"github.com/aws/eks-anywhere/pkg/networking/kindnetd/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type kindnetdTest struct {
*WithT
ctx context.Context
k *kindnetd.Kindnetd
cluster *types.Cluster
client *mocks.MockClient
reader manifests.FileReader
spec *cluster.Spec
}
func newKindnetdTest(t *testing.T) *kindnetdTest {
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
reader := files.NewReader()
return &kindnetdTest{
WithT: NewWithT(t),
ctx: context.Background(),
client: client,
cluster: &types.Cluster{
Name: "w-cluster",
KubeconfigFile: "config.kubeconfig",
},
reader: reader,
k: kindnetd.NewKindnetd(client, reader),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.1.0/24"}
s.VersionsBundle.Kindnetd = kindnetdBundle
}),
}
}
func TestKindnetdInstallSuccess(t *testing.T) {
tt := newKindnetdTest(t)
tt.client.EXPECT().ApplyKubeSpecFromBytes(
tt.ctx,
tt.cluster,
test.MatchFile("testdata/expected_kindnetd_manifest.yaml"),
)
tt.Expect(tt.k.Install(tt.ctx, tt.cluster, tt.spec, nil)).To(Succeed())
}
var kindnetdBundle = v1alpha1.KindnetdBundle{
Manifest: v1alpha1.Manifest{
URI: "testdata/kindnetd_manifest.yaml",
},
}
| 67 |
eks-anywhere | aws | Go | package kindnetd
import (
"errors"
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/templater"
)
func generateManifest(reader manifests.FileReader, clusterSpec *cluster.Spec) ([]byte, error) {
kindnetdManifest, err := bundles.ReadManifest(reader, clusterSpec.VersionsBundle.Kindnetd.Manifest)
if err != nil {
return nil, fmt.Errorf("can't load kindnetd manifest: %v", err)
}
templates := strings.Split(string(kindnetdManifest.Content), "---")
finalTemplates := make([][]byte, 0, len(templates))
for _, template := range templates {
u := &unstructured.Unstructured{}
if err := yaml.Unmarshal([]byte(template), u); err != nil {
return nil, fmt.Errorf("unmarshaling kindnetd type [%s]: %v", template, err)
}
if u.GetKind() == "DaemonSet" {
updated, err := updatePodSubnet(clusterSpec, u)
if err != nil {
return nil, fmt.Errorf("updating kindnetd pod subnet [%s]: %v", template, err)
}
finalTemplates = append(finalTemplates, updated)
} else {
finalTemplates = append(finalTemplates, []byte(template))
}
}
return templater.AppendYamlResources(finalTemplates...), nil
}
func updatePodSubnet(clusterSpec *cluster.Spec, unstructured *unstructured.Unstructured) ([]byte, error) {
var daemonSet appsv1.DaemonSet
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructured.UnstructuredContent(), &daemonSet); err != nil {
return nil, fmt.Errorf("unmarshaling kindnetd daemonset: %v", err)
}
if len(daemonSet.Spec.Template.Spec.Containers) == 0 {
return nil, errors.New("missing container in kindnetd daemonset")
}
for idx, env := range daemonSet.Spec.Template.Spec.Containers[0].Env {
if env.Name == "POD_SUBNET" {
daemonSet.Spec.Template.Spec.Containers[0].Env[idx].Value = clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks[0]
}
}
return yaml.Marshal(daemonSet)
}
| 59 |
eks-anywhere | aws | Go | package kindnetd
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/types"
)
// Upgrader allows to upgrade a kindnetd installation in a EKS-A cluster.
type Upgrader struct {
client Client
reader manifests.FileReader
}
// NewUpgrader constructs a new Upgrader.
func NewUpgrader(client Client, reader manifests.FileReader) *Upgrader {
return &Upgrader{
client: client,
reader: reader,
}
}
// Upgrade configures a kindnetd installation to match the desired state in the cluster Spec.
func (u Upgrader) Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec, namespaces []string) (*types.ChangeDiff, error) {
diff := kindnetdChangeDiff(currentSpec, newSpec)
if diff == nil {
logger.V(1).Info("Nothing to upgrade for Kindnetd")
return nil, nil
}
manifest, err := generateManifest(u.reader, newSpec)
if err != nil {
return nil, err
}
if err := u.client.ApplyKubeSpecFromBytes(ctx, cluster, manifest); err != nil {
return nil, fmt.Errorf("failed applying kindnetd manifest during upgrade: %v", err)
}
return types.NewChangeDiff(diff), nil
}
func kindnetdChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff {
if currentSpec.VersionsBundle.Kindnetd.Version == newSpec.VersionsBundle.Kindnetd.Version {
return nil
}
return &types.ComponentChangeDiff{
ComponentName: "kindnetd",
OldVersion: currentSpec.VersionsBundle.Kindnetd.Version,
NewVersion: newSpec.VersionsBundle.Kindnetd.Version,
}
}
// RunPostControlPlaneUpgradeSetup satisfies the clustermanager.Networking interface.
// It is a noop for kindnetd.
func (u Upgrader) RunPostControlPlaneUpgradeSetup(_ context.Context, _ *types.Cluster) error {
return nil
}
| 64 |
eks-anywhere | aws | Go | package kindnetd_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/networking/kindnetd"
"github.com/aws/eks-anywhere/pkg/types"
)
type upgraderTest struct {
*kindnetdTest
ctx context.Context
u *kindnetd.Upgrader
manifest []byte
currentSpec, newSpec *cluster.Spec
cluster *types.Cluster
wantChangeDiff *types.ChangeDiff
}
func newUpgraderTest(t *testing.T) *upgraderTest {
kt := newKindnetdTest(t)
u := kindnetd.NewUpgrader(kt.client, kt.reader)
return &upgraderTest{
kindnetdTest: kt,
ctx: context.Background(),
u: u,
manifest: []byte(test.ReadFile(t, "testdata/expected_kindnetd_manifest.yaml")),
currentSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.1.0/24"}
s.VersionsBundle.Kindnetd = *kindnetdBundle.DeepCopy()
s.VersionsBundle.Kindnetd.Version = "v1.9.10-eksa.1"
}),
newSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.1.0/24"}
s.VersionsBundle.Kindnetd = *kindnetdBundle.DeepCopy()
s.VersionsBundle.Kindnetd.Version = "v1.9.11-eksa.1"
}),
cluster: &types.Cluster{
KubeconfigFile: "kubeconfig",
},
wantChangeDiff: types.NewChangeDiff(&types.ComponentChangeDiff{
ComponentName: "kindnetd",
OldVersion: "v1.9.10-eksa.1",
NewVersion: "v1.9.11-eksa.1",
}),
}
}
func TestUpgraderUpgradeSuccess(t *testing.T) {
tt := newUpgraderTest(t)
tt.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, tt.manifest)
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(Equal(tt.wantChangeDiff), "upgrader.Upgrade() should succeed and return correct ChangeDiff")
}
func TestUpgraderUpgradeNotNeeded(t *testing.T) {
tt := newUpgraderTest(t)
tt.currentSpec.VersionsBundle.Kindnetd.Version = "v1.0.0"
tt.newSpec.VersionsBundle.Kindnetd.Version = "v1.0.0"
tt.Expect(tt.u.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec, []string{})).To(BeNil(), "upgrader.Upgrade() should succeed and return nil ChangeDiff")
}
func TestUpgraderdRunPostControlPlaneUpgradeSetup(t *testing.T) {
tt := newUpgraderTest(t)
tt.Expect(tt.u.RunPostControlPlaneUpgradeSetup(context.Background(), nil)).To(Succeed())
}
| 73 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networking/kindnetd/kindnetd.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
types "github.com/aws/eks-anywhere/pkg/types"
gomock "github.com/golang/mock/gomock"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// ApplyKubeSpecFromBytes mocks base method.
func (m *MockClient) ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytes", ctx, cluster, data)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytes indicates an expected call of ApplyKubeSpecFromBytes.
func (mr *MockClientMockRecorder) ApplyKubeSpecFromBytes(ctx, cluster, data interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytes", reflect.TypeOf((*MockClient)(nil).ApplyKubeSpecFromBytes), ctx, cluster, data)
}
| 51 |
eks-anywhere | aws | Go | package reconciler
import (
"context"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
)
type CiliumReconciler interface {
Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error)
}
type Reconciler struct {
ciliumReconciler CiliumReconciler
}
func New(ciliumReconciler CiliumReconciler) *Reconciler {
return &Reconciler{
ciliumReconciler: ciliumReconciler,
}
}
// Reconcile takes the specified CNI in a cluster to the desired state defined in a cluster Spec
// It uses a controller.Result to indicate when requeues are needed
// Intended to be used in a kubernetes controller
// Only Cilium CNI is supported for now.
func (r *Reconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) {
if spec.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium != nil {
return r.ciliumReconciler.Reconcile(ctx, logger, client, spec)
} else {
return controller.Result{}, errors.New("unsupported CNI, only Cilium is supported at this time")
}
}
| 39 |
eks-anywhere | aws | Go | package reconciler_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/networking/reconciler"
"github.com/aws/eks-anywhere/pkg/networking/reconciler/mocks"
)
func TestReconcilerReconcileCilium(t *testing.T) {
ctx := context.Background()
logger := test.NewNullLogger()
client := fake.NewClientBuilder().Build()
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{},
}
})
g := NewWithT(t)
ctrl := gomock.NewController(t)
ciliumReconciler := mocks.NewMockCiliumReconciler(ctrl)
ciliumReconciler.EXPECT().Reconcile(ctx, logger, client, spec)
r := reconciler.New(ciliumReconciler)
result, err := r.Reconcile(ctx, logger, client, spec)
g.Expect(result).To(Equal(controller.Result{}))
g.Expect(err).NotTo(HaveOccurred())
}
func TestReconcilerReconcileUnsupportedCNI(t *testing.T) {
ctx := context.Background()
logger := test.NewNullLogger()
client := fake.NewClientBuilder().Build()
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{}
})
g := NewWithT(t)
ctrl := gomock.NewController(t)
ciliumReconciler := mocks.NewMockCiliumReconciler(ctrl)
r := reconciler.New(ciliumReconciler)
_, err := r.Reconcile(ctx, logger, client, spec)
g.Expect(err).To(MatchError(ContainSubstring("unsupported CNI, only Cilium is supported at this time")))
}
| 56 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networking/reconciler/reconciler.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
controller "github.com/aws/eks-anywhere/pkg/controller"
logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockCiliumReconciler is a mock of CiliumReconciler interface.
type MockCiliumReconciler struct {
ctrl *gomock.Controller
recorder *MockCiliumReconcilerMockRecorder
}
// MockCiliumReconcilerMockRecorder is the mock recorder for MockCiliumReconciler.
type MockCiliumReconcilerMockRecorder struct {
mock *MockCiliumReconciler
}
// NewMockCiliumReconciler creates a new mock instance.
func NewMockCiliumReconciler(ctrl *gomock.Controller) *MockCiliumReconciler {
mock := &MockCiliumReconciler{ctrl: ctrl}
mock.recorder = &MockCiliumReconcilerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCiliumReconciler) EXPECT() *MockCiliumReconcilerMockRecorder {
return m.recorder
}
// Reconcile mocks base method.
func (m *MockCiliumReconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", ctx, logger, client, spec)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockCiliumReconcilerMockRecorder) Reconcile(ctx, logger, client, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockCiliumReconciler)(nil).Reconcile), ctx, logger, client, spec)
}
| 55 |
eks-anywhere | aws | Go | package networkutils
import (
"fmt"
"math/rand"
"net"
"time"
)
type IPGenerator struct {
netClient NetClient
rand *rand.Rand
}
func NewIPGenerator(netClient NetClient) IPGenerator {
return IPGenerator{
netClient: netClient,
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
}
}
func (ipgen IPGenerator) GenerateUniqueIP(cidrBlock string) (string, error) {
_, cidr, err := net.ParseCIDR(cidrBlock)
if err != nil {
return "", err
}
uniqueIp, err := ipgen.randIp(cidr)
if err != nil {
return "", err
}
for IsIPInUse(ipgen.netClient, uniqueIp.String()) {
uniqueIp, err = ipgen.randIp(cidr)
if err != nil {
return "", err
}
}
return uniqueIp.String(), nil
}
// generates a random ip within the specified cidr block.
func (ipgen IPGenerator) randIp(cidr *net.IPNet) (net.IP, error) {
newIp := *new(net.IP)
for i := 0; i < 4; i++ {
newIp = append(newIp, byte(ipgen.rand.Intn(255))&^cidr.Mask[i]|cidr.IP[i])
}
if !cidr.Contains(newIp) {
return nil, fmt.Errorf("random IP generation failed")
}
return newIp, nil
}
| 51 |
eks-anywhere | aws | Go | package networkutils_test
import (
"errors"
"net"
"testing"
"time"
"github.com/aws/eks-anywhere/pkg/networkutils"
)
type DummyNetClient struct{}
func (n *DummyNetClient) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) {
// add dummy case for coverage
if address == "255.255.255.255:22" {
return &net.IPConn{}, nil
}
return nil, errors.New("")
}
func TestGenerateUniqueIP(t *testing.T) {
cidrBlock := "1.2.3.4/16"
ipgen := networkutils.NewIPGenerator(&DummyNetClient{})
ip, err := ipgen.GenerateUniqueIP(cidrBlock)
if err != nil {
t.Fatalf("GenerateUniqueIP() ip = %v error: %v", ip, err)
}
}
| 31 |
eks-anywhere | aws | Go | package networkutils
import (
"errors"
"fmt"
"os"
"strings"
)
type IPPool []string
func NewIPPool() IPPool {
return IPPool{}
}
func NewIPPoolFromString(fromString string) IPPool {
return IPPool(strings.Split(fromString, ","))
}
func NewIPPoolFromEnv(ipPoolEnvVar string) (IPPool, error) {
value, ok := os.LookupEnv(ipPoolEnvVar)
if !ok {
return NewIPPool(), fmt.Errorf("%s environment ip pool does not exist", ipPoolEnvVar)
}
if value != "" {
return NewIPPoolFromString(value), nil
}
return NewIPPool(), nil
}
func (ipPool *IPPool) ToString() string {
return strings.Join(*ipPool, ",")
}
func (ipPool *IPPool) IsEmpty() bool {
return len(*ipPool) == 0
}
func (ipPool *IPPool) AddIP(ip string) {
*ipPool = append(*ipPool, ip)
}
func (ipPool *IPPool) PopIP() (string, error) {
if ipPool.IsEmpty() {
return "", errors.New("ip pool is empty")
} else {
index := len(*ipPool) - 1
ip := (*ipPool)[index]
*ipPool = (*ipPool)[:index]
return ip, nil
}
}
func (ipPool *IPPool) ToEnvVar(envVarName string) error {
s := ipPool.ToString()
err := os.Setenv(envVarName, s)
if err != nil {
return fmt.Errorf("failed to set the ip pool env var %s to value %s", envVarName, s)
}
return nil
}
| 62 |
eks-anywhere | aws | Go | package networkutils
import (
"net"
"time"
)
type NetClient interface {
DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
}
type DefaultNetClient struct{}
func (n *DefaultNetClient) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout(network, address, timeout)
}
| 17 |
eks-anywhere | aws | Go | package networkutils
import (
"errors"
"fmt"
"net"
"strconv"
"syscall"
"time"
)
func IsPortValid(port string) bool {
p, err := strconv.Atoi(port)
return err == nil && p >= 1 && p <= 65535
}
func ValidateIP(ip string) error {
if ip == "" {
return fmt.Errorf("is required")
}
parsedIp := net.ParseIP(ip)
if parsedIp == nil {
return fmt.Errorf("is invalid: %s", ip)
}
return nil
}
// IsIPInUse performs a best effort check to see if an IP address is in use. It is not completely
// reliable as testing if an IP is in use is inherently difficult, particularly with non-trivial
// network topologies.
func IsIPInUse(client NetClient, ip string) bool {
// Dial and immediately close the connection if it was established as its superfluous for
// our check. We use port 80 as its common and is more likely to get through firewalls
// than other ports.
conn, err := client.DialTimeout("tcp", net.JoinHostPort(ip, "80"), 500*time.Millisecond)
if err == nil {
conn.Close()
}
// If we establish a connection or we receive a response assume that address is in use.
// The latter case covers situations like an IP in use but the port requested is not open.
return err == nil || errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, syscall.ECONNRESET)
}
func IsPortInUse(client NetClient, host string, port string) bool {
address := net.JoinHostPort(host, port)
conn, err := client.DialTimeout("tcp", address, 500*time.Millisecond)
if err == nil {
conn.Close()
return true
}
return false
}
func GetLocalIP() (net.IP, error) {
conn, err := net.Dial("udp", "1.2.3.4:80")
if err != nil {
return nil, fmt.Errorf("failed to retrieve local ip: %v", err)
}
defer conn.Close()
return conn.LocalAddr().(*net.UDPAddr).IP, nil
}
| 66 |
eks-anywhere | aws | Go | package networkutils_test
import (
"errors"
"net"
"reflect"
"syscall"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/networkutils"
"github.com/aws/eks-anywhere/pkg/networkutils/mocks"
)
var (
validPorts = []string{"443", "8080", "32000"}
invalidPorts = []string{"", "443a", "abc", "0", "123456"}
)
func TestIsPortValidExpectValid(t *testing.T) {
for _, port := range validPorts {
if !networkutils.IsPortValid(port) {
t.Fatalf("Expected port %s to be valid", port)
}
}
}
func TestIsPortValidExpectInvalid(t *testing.T) {
for _, port := range invalidPorts {
if networkutils.IsPortValid(port) {
t.Fatalf("Expected port %s to be invalid", port)
}
}
}
func TestIsIPInUsePass(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
client := mocks.NewMockNetClient(ctrl)
client.EXPECT().DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil, errors.New("no connection"))
res := networkutils.IsIPInUse(client, "10.10.10.10")
g.Expect(res).To(gomega.BeFalse())
}
func TestIsIPInUseConnectionRefused(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
client := mocks.NewMockNetClient(ctrl)
client.EXPECT().DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil, syscall.ECONNREFUSED)
res := networkutils.IsIPInUse(client, "10.10.10.10")
g.Expect(res).To(gomega.BeTrue())
}
func TestIsIPInUseFail(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
conn := NewMockConn(ctrl)
conn.EXPECT().Close().Return(nil)
client := mocks.NewMockNetClient(ctrl)
client.EXPECT().DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(conn, nil)
res := networkutils.IsIPInUse(client, "10.10.10.10")
g.Expect(res).To(gomega.BeTrue())
}
func TestIsPortInUsePass(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
client := mocks.NewMockNetClient(ctrl)
client.EXPECT().DialTimeout("tcp", "10.10.10.10:80", 500*time.Millisecond).
Return(nil, errors.New("no connection"))
res := networkutils.IsPortInUse(client, "10.10.10.10", "80")
g.Expect(res).To(gomega.BeFalse())
}
func TestIsPortInUseFail(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
conn := NewMockConn(ctrl)
conn.EXPECT().Close().Return(nil)
client := mocks.NewMockNetClient(ctrl)
client.EXPECT().DialTimeout("tcp", "10.10.10.10:80", 500*time.Millisecond).
Return(conn, nil)
res := networkutils.IsPortInUse(client, "10.10.10.10", "80")
g.Expect(res).To(gomega.BeTrue())
}
func TestGetLocalIP(t *testing.T) {
_, err := networkutils.GetLocalIP()
if err != nil {
t.Fatalf("unable to get local IP: %v", err)
}
}
// MockConn is a mock of NetClient interface. It is hand written.
type MockConn struct {
ctrl *gomock.Controller
recorder *MockConnMockRecorder
}
var _ net.Conn = &MockConn{}
// MockConnMockRecorder is the mock recorder for MockConn.
type MockConnMockRecorder struct {
mock *MockConn
}
// NewMockConn creates a new mock instance.
func NewMockConn(ctrl *gomock.Controller) *MockConn {
mock := &MockConn{ctrl: ctrl}
mock.recorder = &MockConnMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockConn) EXPECT() *MockConnMockRecorder {
return m.recorder
}
// DialTimeout mocks base method.
func (m *MockConn) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
func (m *MockConn) Read(b []byte) (n int, err error) { panic("unimplemented") }
func (m *MockConn) Write(b []byte) (n int, err error) { panic("unimplemented") }
func (m *MockConn) LocalAddr() net.Addr { panic("unimplemented") }
func (m *MockConn) RemoteAddr() net.Addr { panic("unimplemented") }
func (m *MockConn) SetDeadline(t time.Time) error { panic("unimplemented") }
func (m *MockConn) SetReadDeadline(t time.Time) error { panic("unimplemented") }
func (m *MockConn) SetWriteDeadline(t time.Time) error { panic("unimplemented") }
func (mr *MockConnMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockConn)(nil).Close))
}
| 157 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/networkutils/netclient.go
// Package mocks is a generated GoMock package.
package mocks
import (
net "net"
reflect "reflect"
time "time"
gomock "github.com/golang/mock/gomock"
)
// MockNetClient is a mock of NetClient interface.
type MockNetClient struct {
ctrl *gomock.Controller
recorder *MockNetClientMockRecorder
}
// MockNetClientMockRecorder is the mock recorder for MockNetClient.
type MockNetClientMockRecorder struct {
mock *MockNetClient
}
// NewMockNetClient creates a new mock instance.
func NewMockNetClient(ctrl *gomock.Controller) *MockNetClient {
mock := &MockNetClient{ctrl: ctrl}
mock.recorder = &MockNetClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockNetClient) EXPECT() *MockNetClientMockRecorder {
return m.recorder
}
// DialTimeout mocks base method.
func (m *MockNetClient) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DialTimeout", network, address, timeout)
ret0, _ := ret[0].(net.Conn)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DialTimeout indicates an expected call of DialTimeout.
func (mr *MockNetClientMockRecorder) DialTimeout(network, address, timeout interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialTimeout", reflect.TypeOf((*MockNetClient)(nil).DialTimeout), network, address, timeout)
}
| 52 |
eks-anywhere | aws | Go | package providers
func ConfigsMapToSlice(c map[string]MachineConfig) []MachineConfig {
configs := make([]MachineConfig, 0, len(c))
for _, config := range c {
configs = append(configs, config)
}
return configs
}
| 11 |
eks-anywhere | aws | Go | package providers
import (
"fmt"
)
const (
EtcdNodeNameSuffix = "etcd"
ControlPlaneNodeNameSuffix = "cp"
)
func GetControlPlaneNodeName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, ControlPlaneNodeNameSuffix)
}
func GetEtcdNodeName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, EtcdNodeNameSuffix)
}
| 19 |
eks-anywhere | aws | Go | package providers
import (
"context"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/types"
)
type Provider interface {
Name() string
SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error
SetupAndValidateDeleteCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, currentSpec *cluster.Spec) error
UpdateSecrets(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
GenerateCAPISpecForCreate(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error)
GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currrentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error)
// PreCAPIInstallOnBootstrap is called after the bootstrap cluster is setup but before CAPI resources are installed on it. This allows us to do provider specific configuration on the bootstrap cluster.
PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error
PostBootstrapDeleteForUpgrade(ctx context.Context) error
PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error
// PostWorkloadInit is called after the workload cluster is created and initialized with a CNI. This allows us to do provider specific configuration on the workload cluster.
PostWorkloadInit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
BootstrapClusterOpts(clusterSpec *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error)
UpdateKubeConfig(content *[]byte, clusterName string) error
Version(clusterSpec *cluster.Spec) string
EnvMap(clusterSpec *cluster.Spec) (map[string]string, error)
GetDeployments() map[string][]string
GetInfrastructureBundle(clusterSpec *cluster.Spec) *types.InfrastructureBundle
DatacenterConfig(clusterSpec *cluster.Spec) DatacenterConfig
DatacenterResourceType() string
MachineResourceType() string
MachineConfigs(clusterSpec *cluster.Spec) []MachineConfig
ValidateNewSpec(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff
RunPostControlPlaneUpgrade(ctx context.Context, oldClusterSpec *cluster.Spec, clusterSpec *cluster.Spec, workloadCluster *types.Cluster, managementCluster *types.Cluster) error
UpgradeNeeded(ctx context.Context, newSpec, currentSpec *cluster.Spec, cluster *types.Cluster) (bool, error)
DeleteResources(ctx context.Context, clusterSpec *cluster.Spec) error
InstallCustomProviderComponents(ctx context.Context, kubeconfigFile string) error
PostClusterDeleteValidate(ctx context.Context, managementCluster *types.Cluster) error
// PostMoveManagementToBootstrap is called after the CAPI management is moved back to the bootstrap cluster.
PostMoveManagementToBootstrap(ctx context.Context, bootstrapCluster *types.Cluster) error
PreCoreComponentsUpgrade(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
}
type DatacenterConfig interface {
Kind() string
PauseReconcile()
ClearPauseAnnotation()
Marshallable() v1alpha1.Marshallable
}
type BuildMapOption func(map[string]interface{})
type TemplateBuilder interface {
GenerateCAPISpecControlPlane(clusterSpec *cluster.Spec, buildOptions ...BuildMapOption) (content []byte, err error)
GenerateCAPISpecWorkers(clusterSpec *cluster.Spec, workloadTemplateNames, kubeadmconfigTemplateNames map[string]string) (content []byte, err error)
}
type MachineConfig interface {
OSFamily() v1alpha1.OSFamily
Marshallable() v1alpha1.Marshallable
GetNamespace() string
GetName() string
}
| 69 |
eks-anywhere | aws | Go | package cloudstack
import (
"fmt"
"sort"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
)
// CloudStackMachineTemplateKind defines the K8s Kind corresponding with the MachineTemplate.
const CloudStackMachineTemplateKind = "CloudStackMachineTemplate"
func machineDeployment(clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration, kubeadmConfigTemplate *bootstrapv1.KubeadmConfigTemplate, cloudstackMachineTemplate *cloudstackv1.CloudStackMachineTemplate) clusterv1.MachineDeployment {
return *clusterapi.MachineDeployment(clusterSpec, workerNodeGroupConfig, kubeadmConfigTemplate, cloudstackMachineTemplate)
}
// MachineDeployments returns generated CAPI MachineDeployment objects for a given cluster spec.
func MachineDeployments(clusterSpec *cluster.Spec, kubeadmConfigTemplates map[string]*bootstrapv1.KubeadmConfigTemplate, machineTemplates map[string]*cloudstackv1.CloudStackMachineTemplate) map[string]*clusterv1.MachineDeployment {
m := make(map[string]*clusterv1.MachineDeployment, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfig := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
deployment := machineDeployment(clusterSpec, workerNodeGroupConfig,
kubeadmConfigTemplates[workerNodeGroupConfig.Name],
machineTemplates[workerNodeGroupConfig.Name],
)
m[workerNodeGroupConfig.Name] = &deployment
}
return m
}
func generateMachineTemplateAnnotations(machineConfig *v1alpha1.CloudStackMachineConfigSpec) map[string]string {
annotations := make(map[string]string, 0)
if machineConfig.DiskOffering != nil {
annotations[fmt.Sprintf("mountpath.diskoffering.%s", constants.CloudstackAnnotationSuffix)] = machineConfig.DiskOffering.MountPath
annotations[fmt.Sprintf("device.diskoffering.%s", constants.CloudstackAnnotationSuffix)] = machineConfig.DiskOffering.Device
annotations[fmt.Sprintf("filesystem.diskoffering.%s", constants.CloudstackAnnotationSuffix)] = machineConfig.DiskOffering.Filesystem
annotations[fmt.Sprintf("label.diskoffering.%s", constants.CloudstackAnnotationSuffix)] = machineConfig.DiskOffering.Label
}
if machineConfig.Symlinks != nil {
links := make([]string, 0)
for key := range machineConfig.Symlinks {
links = append(links, fmt.Sprintf("%s:%s", key, machineConfig.Symlinks[key]))
}
// sorting for unit test determinism
sort.Strings(links)
annotations[fmt.Sprintf("symlinks.%s", constants.CloudstackAnnotationSuffix)] = strings.Join(links, ",")
}
return annotations
}
func setDiskOffering(machineConfig *v1alpha1.CloudStackMachineConfigSpec, template *cloudstackv1.CloudStackMachineTemplate) {
if machineConfig.DiskOffering == nil {
return
}
template.Spec.Spec.Spec.DiskOffering = cloudstackv1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: cloudstackv1.CloudStackResourceIdentifier{
ID: machineConfig.DiskOffering.Id,
Name: machineConfig.DiskOffering.Name,
},
CustomSize: machineConfig.DiskOffering.CustomSize,
MountPath: machineConfig.DiskOffering.MountPath,
Device: machineConfig.DiskOffering.Device,
Filesystem: machineConfig.DiskOffering.Filesystem,
Label: machineConfig.DiskOffering.Label,
}
}
// MachineTemplate returns a generated CloudStackMachineTemplate object for a given EKS-A CloudStackMachineConfig.
func MachineTemplate(name string, machineConfig *v1alpha1.CloudStackMachineConfigSpec) *cloudstackv1.CloudStackMachineTemplate {
template := &cloudstackv1.CloudStackMachineTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: cloudstackv1.GroupVersion.String(),
Kind: CloudStackMachineTemplateKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.EksaSystemNamespace,
Annotations: generateMachineTemplateAnnotations(machineConfig),
},
Spec: cloudstackv1.CloudStackMachineTemplateSpec{
Spec: cloudstackv1.CloudStackMachineTemplateResource{
Spec: cloudstackv1.CloudStackMachineSpec{
Details: machineConfig.UserCustomDetails,
Offering: cloudstackv1.CloudStackResourceIdentifier{
ID: machineConfig.ComputeOffering.Id,
Name: machineConfig.ComputeOffering.Name,
},
Template: cloudstackv1.CloudStackResourceIdentifier{
ID: machineConfig.Template.Id,
Name: machineConfig.Template.Name,
},
AffinityGroupIDs: machineConfig.AffinityGroupIds,
Affinity: machineConfig.Affinity,
},
},
},
}
setDiskOffering(machineConfig, template)
return template
}
| 114 |
eks-anywhere | aws | Go | package cloudstack_test
import (
"fmt"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack"
)
type apiBuilderTest struct {
*WithT
machineConfig *v1alpha1.CloudStackMachineConfigSpec
}
func newAPIBuilderTest(t *testing.T) apiBuilderTest {
return apiBuilderTest{
WithT: NewWithT(t),
machineConfig: givenMachineConfig(),
}
}
const (
testMountPath = "testMountPath"
testDevice = "testDevice"
testFilesystem = "testFilesystem"
testLabel = "testLabel"
testDiskSize = 5
computeOfferingID = "computeOfferingID"
computeOfferingName = "computeOfferingName"
diskOfferingID = "diskOfferingID"
diskOfferingName = "diskOfferingName"
templateID = "templateID"
templateName = "templateName"
proAffinity = "pro"
)
var (
affinityGroupIds = []string{"ag1", "ag2"}
testSymLinks = map[string]string{
"sym": "link",
}
testSymLinksString = "sym:link"
testDetails = map[string]string{
"user": "details",
}
)
func givenMachineConfig() *v1alpha1.CloudStackMachineConfigSpec {
return &v1alpha1.CloudStackMachineConfigSpec{
ComputeOffering: v1alpha1.CloudStackResourceIdentifier{
Id: computeOfferingID,
Name: computeOfferingName,
},
DiskOffering: &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: diskOfferingName,
Id: diskOfferingID,
},
CustomSize: testDiskSize,
MountPath: testMountPath,
Device: testDevice,
Filesystem: testFilesystem,
Label: testLabel,
},
Template: v1alpha1.CloudStackResourceIdentifier{
Id: templateID,
Name: templateName,
},
Symlinks: testSymLinks,
UserCustomDetails: testDetails,
AffinityGroupIds: affinityGroupIds,
Affinity: proAffinity,
}
}
func fullCloudStackMachineTemplate() *cloudstackv1.CloudStackMachineTemplate {
return &cloudstackv1.CloudStackMachineTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: cloudstackv1.GroupVersion.String(),
Kind: cloudstack.CloudStackMachineTemplateKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: "cloudstack-test-md-0-1",
Namespace: "eksa-system",
Annotations: map[string]string{
fmt.Sprintf("mountpath.diskoffering.%s", constants.CloudstackAnnotationSuffix): testMountPath,
fmt.Sprintf("device.diskoffering.%s", constants.CloudstackAnnotationSuffix): testDevice,
fmt.Sprintf("filesystem.diskoffering.%s", constants.CloudstackAnnotationSuffix): testFilesystem,
fmt.Sprintf("label.diskoffering.%s", constants.CloudstackAnnotationSuffix): testLabel,
fmt.Sprintf("symlinks.%s", constants.CloudstackAnnotationSuffix): testSymLinksString,
},
},
Spec: cloudstackv1.CloudStackMachineTemplateSpec{
Spec: cloudstackv1.CloudStackMachineTemplateResource{
Spec: cloudstackv1.CloudStackMachineSpec{
Details: testDetails,
Offering: cloudstackv1.CloudStackResourceIdentifier{
ID: computeOfferingID,
Name: computeOfferingName,
},
Template: cloudstackv1.CloudStackResourceIdentifier{
ID: templateID,
Name: templateName,
},
AffinityGroupIDs: affinityGroupIds,
Affinity: proAffinity,
DiskOffering: cloudstackv1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: cloudstackv1.CloudStackResourceIdentifier{
ID: diskOfferingID,
Name: diskOfferingName,
},
CustomSize: testDiskSize,
MountPath: testMountPath,
Device: testDevice,
Filesystem: testFilesystem,
Label: testLabel,
},
},
},
},
}
}
func TestFullCloudStackMachineTemplate(t *testing.T) {
tt := newAPIBuilderTest(t)
got := cloudstack.MachineTemplate("cloudstack-test-control-plane-1", tt.machineConfig)
want := fullCloudStackMachineTemplate()
tt.Expect(got.Spec.Spec.Spec).To(Equal(want.Spec.Spec.Spec))
tt.Expect(got.Annotations).To(Equal(want.Annotations))
}
func TestBasicCloudStackMachineDeployment(t *testing.T) {
tt := newAPIBuilderTest(t)
count := 1
workerNodeGroupConfig := v1alpha1.WorkerNodeGroupConfiguration{
Name: "test-worker-node-group",
Count: &count,
}
kubeadmConfigTemplates := map[string]*bootstrapv1.KubeadmConfigTemplate{
workerNodeGroupConfig.Name: {
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmConfigTemplate",
APIVersion: bootstrapv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubeadmConfigTemplate",
},
},
}
fullMatchineTemplate := fullCloudStackMachineTemplate()
matchineTemplates := map[string]*cloudstackv1.CloudStackMachineTemplate{
workerNodeGroupConfig.Name: fullMatchineTemplate,
}
spec := &cluster.Spec{
VersionsBundle: &cluster.VersionsBundle{
KubeDistro: &cluster.KubeDistro{
Kubernetes: cluster.VersionedRepository{
Tag: "eksd-tag",
},
},
},
Config: &cluster.Config{
Cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{
workerNodeGroupConfig,
},
},
},
},
}
got := cloudstack.MachineDeployments(spec, kubeadmConfigTemplates, matchineTemplates)
tt.Expect(len(got)).To(Equal(*workerNodeGroupConfig.Count))
tt.Expect(int(*got[workerNodeGroupConfig.Name].Spec.Replicas)).To(Equal(*workerNodeGroupConfig.Count))
tt.Expect(got[workerNodeGroupConfig.Name].Spec.Template.Spec.InfrastructureRef.Name).To(Equal(fullMatchineTemplate.Name))
tt.Expect(got[workerNodeGroupConfig.Name].Spec.Template.Spec.InfrastructureRef.Kind).To(Equal(cloudstack.CloudStackMachineTemplateKind))
tt.Expect(got[workerNodeGroupConfig.Name].Spec.Template.Spec.InfrastructureRef.APIVersion).To(Equal(cloudstackv1.GroupVersion.String()))
}
| 187 |
eks-anywhere | aws | Go | package cloudstack
import (
"context"
_ "embed"
"errors"
"fmt"
"net/url"
"os"
"strconv"
etcdv1beta1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
"github.com/aws/eks-anywhere/pkg/providers/common"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
eksaLicense = "EKSA_LICENSE"
etcdTemplateNameKey = "etcdTemplateName"
cpTemplateNameKey = "controlPlaneTemplateName"
)
//go:embed config/template-cp.yaml
var defaultCAPIConfigCP string
//go:embed config/template-md.yaml
var defaultClusterConfigMD string
var requiredEnvs = []string{decoder.CloudStackCloudConfigB64SecretKey}
var (
eksaCloudStackDatacenterResourceType = fmt.Sprintf("cloudstackdatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaCloudStackMachineResourceType = fmt.Sprintf("cloudstackmachineconfigs.%s", v1alpha1.GroupVersion.Group)
)
type cloudstackProvider struct {
datacenterConfig *v1alpha1.CloudStackDatacenterConfig
clusterConfig *v1alpha1.Cluster
providerKubectlClient ProviderKubectlClient
writer filewriter.FileWriter
selfSigned bool
templateBuilder *TemplateBuilder
validator ProviderValidator
execConfig *decoder.CloudStackExecConfig
log logr.Logger
}
func (p *cloudstackProvider) PreBootstrapSetup(ctx context.Context, cluster *types.Cluster) error {
return nil
}
func (p *cloudstackProvider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
p.log.Info("Installing secrets on bootstrap cluster")
return p.UpdateSecrets(ctx, cluster, nil)
}
func (p *cloudstackProvider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return nil
}
func (p *cloudstackProvider) PostBootstrapDeleteForUpgrade(ctx context.Context) error {
return nil
}
func (p *cloudstackProvider) PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return nil
}
func (p *cloudstackProvider) PostWorkloadInit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
return nil
}
func (p *cloudstackProvider) UpdateSecrets(ctx context.Context, cluster *types.Cluster, _ *cluster.Spec) error {
contents, err := p.generateSecrets(ctx, cluster)
if err != nil {
return fmt.Errorf("creating secrets object: %v", err)
}
if len(contents) > 0 {
if err := p.providerKubectlClient.ApplyKubeSpecFromBytes(ctx, cluster, contents); err != nil {
return fmt.Errorf("applying secrets object: %v", err)
}
}
return nil
}
func (p *cloudstackProvider) generateSecrets(ctx context.Context, cluster *types.Cluster) ([]byte, error) {
secrets := [][]byte{}
for _, profile := range p.execConfig.Profiles {
_, err := p.providerKubectlClient.GetSecretFromNamespace(ctx, cluster.KubeconfigFile, profile.Name, constants.EksaSystemNamespace)
if err == nil {
// When a secret already exists with the profile name we skip creating it
continue
}
if !apierrors.IsNotFound(err) {
return nil, fmt.Errorf("getting secret for profile %s: %v", profile.Name, err)
}
bytes, err := yaml.Marshal(generateSecret(profile))
if err != nil {
return nil, fmt.Errorf("marshalling secret for profile %s: %v", profile.Name, err)
}
secrets = append(secrets, bytes)
}
return templater.AppendYamlResources(secrets...), nil
}
func generateSecret(profile decoder.CloudStackProfileConfig) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.Version,
},
ObjectMeta: metav1.ObjectMeta{
Namespace: constants.EksaSystemNamespace,
Name: profile.Name,
},
StringData: map[string]string{
decoder.APIUrlKey: profile.ManagementUrl,
decoder.APIKeyKey: profile.ApiKey,
decoder.SecretKeyKey: profile.SecretKey,
decoder.VerifySslKey: profile.VerifySsl,
},
}
}
func machineRefSliceToMap(machineRefs []v1alpha1.Ref) map[string]v1alpha1.Ref {
refMap := make(map[string]v1alpha1.Ref, len(machineRefs))
for _, ref := range machineRefs {
refMap[ref.Name] = ref
}
return refMap
}
func (p *cloudstackProvider) validateMachineConfigImmutability(ctx context.Context, cluster *types.Cluster, newConfig *v1alpha1.CloudStackMachineConfig, clusterSpec *cluster.Spec) error {
prevMachineConfig, err := p.providerKubectlClient.GetEksaCloudStackMachineConfig(ctx, newConfig.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
err = newConfig.ValidateUpdate(prevMachineConfig)
if err != nil {
return err
}
return nil
}
func (p *cloudstackProvider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
prevSpec, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name)
if err != nil {
return err
}
prevDatacenter, err := p.providerKubectlClient.GetEksaCloudStackDatacenterConfig(ctx, prevSpec.Spec.DatacenterRef.Name, cluster.KubeconfigFile, prevSpec.Namespace)
if err != nil {
return err
}
prevDatacenter.SetDefaults()
if err = clusterSpec.CloudStackDatacenter.ValidateUpdate(prevDatacenter); err != nil {
return err
}
prevMachineConfigRefs := machineRefSliceToMap(prevSpec.MachineConfigRefs())
for _, machineConfigRef := range clusterSpec.Cluster.MachineConfigRefs() {
machineConfig, ok := clusterSpec.CloudStackMachineConfigs[machineConfigRef.Name]
if !ok {
return fmt.Errorf("cannot find machine config %s in cloudstack provider machine configs", machineConfigRef.Name)
}
if _, ok = prevMachineConfigRefs[machineConfig.Name]; ok {
err = p.validateMachineConfigImmutability(ctx, cluster, machineConfig, clusterSpec)
if err != nil {
return err
}
}
}
return nil
}
func (p *cloudstackProvider) ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff {
if currentSpec.VersionsBundle.CloudStack.Version == newSpec.VersionsBundle.CloudStack.Version {
return nil
}
return &types.ComponentChangeDiff{
ComponentName: constants.CloudStackProviderName,
NewVersion: newSpec.VersionsBundle.CloudStack.Version,
OldVersion: currentSpec.VersionsBundle.CloudStack.Version,
}
}
func (p *cloudstackProvider) RunPostControlPlaneUpgrade(ctx context.Context, oldClusterSpec *cluster.Spec, clusterSpec *cluster.Spec, workloadCluster *types.Cluster, managementCluster *types.Cluster) error {
// Nothing to do
return nil
}
type ProviderKubectlClient interface {
ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error
CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error
LoadSecret(ctx context.Context, secretObject string, secretObjType string, secretObjectName string, kubeConfFile string) error
GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error)
GetEksaCloudStackDatacenterConfig(ctx context.Context, cloudstackDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackDatacenterConfig, error)
GetEksaCloudStackMachineConfig(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackMachineConfig, error)
GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*kubeadmv1beta1.KubeadmControlPlane, error)
GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error)
GetEtcdadmCluster(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*etcdv1beta1.EtcdadmCluster, error)
GetSecretFromNamespace(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.Secret, error)
UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...executables.KubectlOpt) error
SearchCloudStackMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.CloudStackMachineConfig, error)
SearchCloudStackDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.CloudStackDatacenterConfig, error)
DeleteEksaCloudStackDatacenterConfig(ctx context.Context, cloudstackDatacenterConfigName string, kubeconfigFile string, namespace string) error
DeleteEksaCloudStackMachineConfig(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) error
SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error
}
// NewProvider initializes the CloudStack provider object.
func NewProvider(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, validator ProviderValidator, writer filewriter.FileWriter, now types.NowFunc, log logr.Logger) *cloudstackProvider { //nolint:revive
return &cloudstackProvider{
datacenterConfig: datacenterConfig,
clusterConfig: clusterConfig,
providerKubectlClient: providerKubectlClient,
writer: writer,
selfSigned: false,
templateBuilder: NewTemplateBuilder(now),
log: log,
validator: validator,
}
}
func (p *cloudstackProvider) UpdateKubeConfig(_ *[]byte, _ string) error {
// customize generated kube config
return nil
}
func (p *cloudstackProvider) BootstrapClusterOpts(clusterSpec *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error) {
endpoints := []string{}
for _, az := range clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones {
endpoints = append(endpoints, az.ManagementApiEndpoint)
}
return common.BootstrapClusterOpts(p.clusterConfig, endpoints...)
}
func (p *cloudstackProvider) Name() string {
return constants.CloudStackProviderName
}
func (p *cloudstackProvider) DatacenterResourceType() string {
return eksaCloudStackDatacenterResourceType
}
func (p *cloudstackProvider) MachineResourceType() string {
return eksaCloudStackMachineResourceType
}
func (p *cloudstackProvider) generateSSHKeysIfNotSet(machineConfigs map[string]*v1alpha1.CloudStackMachineConfig) error {
var generatedKey string
for _, machineConfig := range machineConfigs {
user := machineConfig.Spec.Users[0]
if user.SshAuthorizedKeys[0] == "" {
if generatedKey != "" { // use same key already generated
user.SshAuthorizedKeys[0] = generatedKey
} else { // generate new key
logger.Info("Provided sshAuthorizedKey is not set or is empty, auto-generating new key pair...", "cloudstackMachineConfig", machineConfig.Name)
var err error
generatedKey, err = common.GenerateSSHAuthKey(p.writer)
if err != nil {
return err
}
user.SshAuthorizedKeys[0] = generatedKey
}
}
}
return nil
}
func (p *cloudstackProvider) setMachineConfigDefaults(clusterSpec *cluster.Spec) {
for _, mc := range clusterSpec.CloudStackMachineConfigs {
mc.SetUserDefaults()
}
}
func (p *cloudstackProvider) validateManagementApiEndpoint(rawurl string) error {
_, err := url.ParseRequestURI(rawurl)
if err != nil {
return fmt.Errorf("CloudStack managementApiEndpoint is invalid: #{err}")
}
return nil
}
func (p *cloudstackProvider) validateEnv(ctx context.Context) error {
var cloudStackB64EncodedSecret string
var ok bool
if cloudStackB64EncodedSecret, ok = os.LookupEnv(decoder.EksacloudStackCloudConfigB64SecretKey); ok && len(cloudStackB64EncodedSecret) > 0 {
if err := os.Setenv(decoder.CloudStackCloudConfigB64SecretKey, cloudStackB64EncodedSecret); err != nil {
return fmt.Errorf("unable to set %s: %v", decoder.CloudStackCloudConfigB64SecretKey, err)
}
} else {
return fmt.Errorf("%s is not set or is empty", decoder.EksacloudStackCloudConfigB64SecretKey)
}
execConfig, err := decoder.ParseCloudStackCredsFromEnv()
if err != nil {
return fmt.Errorf("failed to parse environment variable exec config: %v", err)
}
if len(execConfig.Profiles) <= 0 {
return errors.New("cloudstack instances are not defined")
}
for _, instance := range execConfig.Profiles {
if err := p.validateManagementApiEndpoint(instance.ManagementUrl); err != nil {
return fmt.Errorf("CloudStack instance %s's managementApiEndpoint %s is invalid: %v",
instance.Name, instance.ManagementUrl, err)
}
}
p.execConfig = execConfig
if _, ok := os.LookupEnv(eksaLicense); !ok {
if err := os.Setenv(eksaLicense, ""); err != nil {
return fmt.Errorf("unable to set %s: %v", eksaLicense, err)
}
}
return nil
}
func (p *cloudstackProvider) validateClusterSpec(ctx context.Context, clusterSpec *cluster.Spec) (err error) {
if err := p.validator.ValidateCloudStackDatacenterConfig(ctx, clusterSpec.CloudStackDatacenter); err != nil {
return err
}
if err := p.validator.ValidateClusterMachineConfigs(ctx, clusterSpec); err != nil {
return err
}
return nil
}
func (p *cloudstackProvider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error {
if err := p.validateEnv(ctx); err != nil {
return fmt.Errorf("validating environment variables: %v", err)
}
if err := v1alpha1.ValidateCloudStackK8sVersion(clusterSpec.Cluster.Spec.KubernetesVersion); err != nil {
return fmt.Errorf("validating K8s version for provider: %v", err)
}
if err := p.validateClusterSpec(ctx, clusterSpec); err != nil {
return fmt.Errorf("validating cluster spec: %v", err)
}
if err := p.validator.ValidateControlPlaneEndpointUniqueness(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host); err != nil {
return fmt.Errorf("validating control plane endpoint uniqueness: %v", err)
}
if err := p.generateSSHKeysIfNotSet(clusterSpec.CloudStackMachineConfigs); err != nil {
return fmt.Errorf("setting up SSH keys: %v", err)
}
if clusterSpec.Cluster.IsManaged() {
for _, mc := range p.MachineConfigs(clusterSpec) {
em, err := p.providerKubectlClient.SearchCloudStackMachineConfig(ctx, mc.GetName(), clusterSpec.ManagementCluster.KubeconfigFile, mc.GetNamespace())
if err != nil {
return err
}
if len(em) > 0 {
return fmt.Errorf("CloudStackMachineConfig %s already exists", mc.GetName())
}
}
existingDatacenter, err := p.providerKubectlClient.SearchCloudStackDatacenterConfig(ctx, clusterSpec.CloudStackDatacenter.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
if len(existingDatacenter) > 0 {
return fmt.Errorf("CloudStackDatacenter %s already exists", clusterSpec.CloudStackDatacenter.Name)
}
}
return nil
}
func (p *cloudstackProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, currentSpec *cluster.Spec) error {
if err := p.validateEnv(ctx); err != nil {
return fmt.Errorf("validating environment variables: %v", err)
}
if err := v1alpha1.ValidateCloudStackK8sVersion(clusterSpec.Cluster.Spec.KubernetesVersion); err != nil {
return fmt.Errorf("validating K8s version for provider: %v", err)
}
p.setMachineConfigDefaults(clusterSpec)
if err := p.validateClusterSpec(ctx, clusterSpec); err != nil {
return fmt.Errorf("validating cluster spec: %v", err)
}
if err := p.validateMachineConfigsNameUniqueness(ctx, cluster, clusterSpec); err != nil {
return fmt.Errorf("failed validate machineconfig uniqueness: %v", err)
}
if err := p.validator.ValidateSecretsUnchanged(ctx, cluster, p.execConfig, p.providerKubectlClient); err != nil {
return fmt.Errorf("validating secrets unchanged: %v", err)
}
return nil
}
func (p *cloudstackProvider) SetupAndValidateDeleteCluster(ctx context.Context, _ *types.Cluster, _ *cluster.Spec) error {
err := p.validateEnv(ctx)
if err != nil {
return fmt.Errorf("validating environment variables: %v", err)
}
return nil
}
func needsNewControlPlaneTemplate(oldSpec, newSpec *cluster.Spec, oldCsmc, newCsmc *v1alpha1.CloudStackMachineConfig, log logr.Logger) bool {
// Another option is to generate MachineTemplates based on the old and new eksa spec,
// remove the name field and compare them with DeepEqual
// We plan to approach this way since it's more flexible to add/remove fields and test out for validation
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
return NeedNewMachineTemplate(oldSpec.CloudStackDatacenter, newSpec.CloudStackDatacenter, oldCsmc, newCsmc, log)
}
func NeedsNewWorkloadTemplate(oldSpec, newSpec *cluster.Spec, oldCsdc, newCsdc *v1alpha1.CloudStackDatacenterConfig, oldCsmc, newCsmc *v1alpha1.CloudStackMachineConfig, log logr.Logger) bool {
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
if !v1alpha1.WorkerNodeGroupConfigurationSliceTaintsEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) ||
!v1alpha1.WorkerNodeGroupConfigurationsLabelsMapEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) {
return true
}
return NeedNewMachineTemplate(oldCsdc, newCsdc, oldCsmc, newCsmc, log)
}
func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration) bool {
return !v1alpha1.TaintsSliceEqual(newWorkerNodeGroup.Taints, oldWorkerNodeGroup.Taints) || !v1alpha1.MapEqual(newWorkerNodeGroup.Labels, oldWorkerNodeGroup.Labels)
}
func needsNewEtcdTemplate(oldSpec, newSpec *cluster.Spec, oldCsmc, newCsmc *v1alpha1.CloudStackMachineConfig, log logr.Logger) bool {
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
return NeedNewMachineTemplate(oldSpec.CloudStackDatacenter, newSpec.CloudStackDatacenter, oldCsmc, newCsmc, log)
}
func (p *cloudstackProvider) needsNewMachineTemplate(ctx context.Context, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, csdc *v1alpha1.CloudStackDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (bool, error) {
if oldWorkerNodeGroup, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
newWorkerMachineConfig := newClusterSpec.CloudStackMachineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name]
oldWorkerMachineConfig, err := p.providerKubectlClient.GetEksaCloudStackMachineConfig(ctx, oldWorkerNodeGroup.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return false, err
}
needsNewWorkloadTemplate := NeedsNewWorkloadTemplate(currentSpec, newClusterSpec, csdc, newClusterSpec.CloudStackDatacenter, oldWorkerMachineConfig, newWorkerMachineConfig, p.log)
return needsNewWorkloadTemplate, nil
}
return true, nil
}
func (p *cloudstackProvider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (bool, error) {
if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
existingWorkerNodeGroupConfig := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]
return NeedsNewKubeadmConfigTemplate(&workerNodeGroupConfiguration, &existingWorkerNodeGroupConfig), nil
}
return true, nil
}
// NeedNewMachineTemplate Used by EKS-A controller and CLI upgrade workflow to compare generated CSDC/CSMC's from
// CAPC resources in fetcher.go with those already on the cluster when deciding whether or not to generate and apply
// new CloudStackMachineTemplates.
func NeedNewMachineTemplate(
oldDatacenterConfig, newDatacenterConfig *v1alpha1.CloudStackDatacenterConfig,
oldMachineConfig, newMachineConfig *v1alpha1.CloudStackMachineConfig,
log logr.Logger,
) bool {
oldAzs := oldDatacenterConfig.Spec.AvailabilityZones
newAzs := newDatacenterConfig.Spec.AvailabilityZones
if !hasSameAvailabilityZones(oldAzs, newAzs) {
log.V(4).Info(
"CloudStackDatacenterConfigs do not match",
"oldAvailabilityZones", oldDatacenterConfig.Spec.AvailabilityZones,
"newAvailabilityZones", newDatacenterConfig.Spec.AvailabilityZones,
)
return true
}
if !oldMachineConfig.Spec.Template.Equal(&newMachineConfig.Spec.Template) {
log.V(4).Info(
"Old and new CloudStackMachineConfig Templates do not match",
"machineConfig", oldMachineConfig.Name,
"oldTemplate", oldMachineConfig.Spec.Template,
"newTemplate", newMachineConfig.Spec.Template,
)
return true
}
if !oldMachineConfig.Spec.ComputeOffering.Equal(&newMachineConfig.Spec.ComputeOffering) {
log.V(4).Info(
"Old and new CloudStackMachineConfig Compute Offerings do not match",
"machineConfig", oldMachineConfig.Name,
"oldComputeOffering", oldMachineConfig.Spec.ComputeOffering,
"newComputeOffering", newMachineConfig.Spec.ComputeOffering,
)
return true
}
if !oldMachineConfig.Spec.DiskOffering.Equal(newMachineConfig.Spec.DiskOffering) {
log.V(4).Info(
"Old and new CloudStackMachineConfig DiskOffering does not match",
"machineConfig", oldMachineConfig.Name,
"oldDiskOffering", oldMachineConfig.Spec.DiskOffering,
"newDiskOffering", newMachineConfig.Spec.DiskOffering,
)
return true
}
if !isEqualMap(oldMachineConfig.Spec.UserCustomDetails, newMachineConfig.Spec.UserCustomDetails) {
log.V(4).Info(
"Old and new CloudStackMachineConfig UserCustomDetails does not match",
"machineConfig", oldMachineConfig.Name,
"oldUserCustomDetails", oldMachineConfig.Spec.UserCustomDetails,
"newUserCustomDetails", newMachineConfig.Spec.UserCustomDetails,
)
return true
}
if !isEqualMap(oldMachineConfig.Spec.Symlinks, newMachineConfig.Spec.Symlinks) {
log.V(4).Info(
"Old and new CloudStackMachineConfig Symlinks does not match",
"machineConfig", oldMachineConfig.Name,
"oldSymlinks", oldMachineConfig.Spec.Symlinks,
"newSymlinks", newMachineConfig.Spec.Symlinks,
)
return true
}
return false
}
func isEqualMap[K, V comparable](a, b map[K]V) bool {
if len(a) != len(b) {
return false
}
// Ensure all keys are present in b, and a's values equal b's values.
for k, av := range a {
if bv, ok := b[k]; !ok || av != bv {
return false
}
}
return true
}
func hasSameAvailabilityZones(old, nw []v1alpha1.CloudStackAvailabilityZone) bool {
if len(old) != len(nw) {
return false
}
oldAzs := map[string]v1alpha1.CloudStackAvailabilityZone{}
for _, az := range old {
oldAzs[az.Name] = az
}
// Equality of availability zones doesn't take into consideration the availability zones
// ManagementApiEndpoint. Its unclear why this is the case. The ManagementApiEndpoint seems
// to only be used for proxy configuration.
equal := func(old, nw v1alpha1.CloudStackAvailabilityZone) bool {
return old.Zone.Equal(&nw.Zone) &&
old.Name == nw.Name &&
old.CredentialsRef == nw.CredentialsRef &&
old.Account == nw.Account &&
old.Domain == nw.Domain
}
for _, newAz := range nw {
oldAz, found := oldAzs[newAz.Name]
if !found || !equal(oldAz, newAz) {
return false
}
}
return true
}
func (p *cloudstackProvider) generateCAPISpecForCreate(ctx context.Context, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
clusterName := clusterSpec.Cluster.Name
cpOpt := func(values map[string]interface{}) {
values[cpTemplateNameKey] = common.CPMachineTemplateName(clusterName, p.templateBuilder.now)
values[etcdTemplateNameKey] = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now)
}
controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(clusterSpec, cpOpt)
if err != nil {
return nil, nil, err
}
workloadTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
kubeadmconfigTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workloadTemplateNames[workerNodeGroupConfiguration.Name] = common.WorkerMachineTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = common.KubeadmConfigTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
}
workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(clusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames)
if err != nil {
return nil, nil, err
}
return controlPlaneSpec, workersSpec, nil
}
func (p *cloudstackProvider) getControlPlaneNameForCAPISpecUpgrade(ctx context.Context, oldCluster *v1alpha1.Cluster, currentSpec, newClusterSpec *cluster.Spec, bootstrapCluster, workloadCluster *types.Cluster, csdc *v1alpha1.CloudStackDatacenterConfig, clusterName string) (string, error) {
controlPlaneMachineConfig := newClusterSpec.CloudStackMachineConfigs[newClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
controlPlaneVmc, err := p.providerKubectlClient.GetEksaCloudStackMachineConfig(ctx, oldCluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return "", err
}
if !needsNewControlPlaneTemplate(currentSpec, newClusterSpec, controlPlaneVmc, controlPlaneMachineConfig, p.log) {
cp, err := p.providerKubectlClient.GetKubeadmControlPlane(ctx, workloadCluster, oldCluster.Name, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return "", err
}
return cp.Spec.MachineTemplate.InfrastructureRef.Name, nil
} else {
return common.CPMachineTemplateName(clusterName, p.templateBuilder.now), nil
}
}
func (p *cloudstackProvider) getWorkloadTemplateSpecForCAPISpecUpgrade(ctx context.Context, currentSpec, newClusterSpec *cluster.Spec, bootstrapCluster, workloadCluster *types.Cluster, csdc *v1alpha1.CloudStackDatacenterConfig, clusterName string) ([]byte, error) {
var kubeadmconfigTemplateName, workloadTemplateName string
previousWorkerNodeGroupConfigs := cluster.BuildMapForWorkerNodeGroupsByName(currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations)
workloadTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
kubeadmconfigTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
needsNewWorkloadTemplate, err := p.needsNewMachineTemplate(ctx, workloadCluster, currentSpec, newClusterSpec, workerNodeGroupConfiguration, csdc, previousWorkerNodeGroupConfigs)
if err != nil {
return nil, err
}
needsNewKubeadmConfigTemplate, err := p.needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs)
if err != nil {
return nil, err
}
if !needsNewKubeadmConfigTemplate {
mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name)
md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, err
}
kubeadmconfigTemplateName = md.Spec.Template.Spec.Bootstrap.ConfigRef.Name
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName
} else {
kubeadmconfigTemplateName = common.KubeadmConfigTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName
}
if !needsNewWorkloadTemplate {
mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name)
md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, err
}
workloadTemplateName = md.Spec.Template.Spec.InfrastructureRef.Name
workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName
} else {
workloadTemplateName = common.WorkerMachineTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName
}
}
return p.templateBuilder.GenerateCAPISpecWorkers(newClusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames)
}
func (p *cloudstackProvider) getEtcdTemplateNameForCAPISpecUpgrade(ctx context.Context, oldCluster *v1alpha1.Cluster, currentSpec, newClusterSpec *cluster.Spec, bootstrapCluster, workloadCluster *types.Cluster, csdc *v1alpha1.CloudStackDatacenterConfig, clusterName string) (string, error) {
etcdMachineConfig := newClusterSpec.CloudStackMachineConfigs[newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name]
etcdMachineVmc, err := p.providerKubectlClient.GetEksaCloudStackMachineConfig(ctx, oldCluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return "", err
}
needsNewEtcdTemplate := needsNewEtcdTemplate(currentSpec, newClusterSpec, etcdMachineVmc, etcdMachineConfig, p.log)
if !needsNewEtcdTemplate {
etcdadmCluster, err := p.providerKubectlClient.GetEtcdadmCluster(ctx, workloadCluster, clusterName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return "", err
}
return etcdadmCluster.Spec.InfrastructureTemplate.Name, nil
} else {
/* During a cluster upgrade, etcd machines need to be upgraded first, so that the etcd machines with new spec get created and can be used by controlplane machines
as etcd endpoints. KCP rollout should not start until then. As a temporary solution in the absence of static etcd endpoints, we annotate the etcd cluster as "upgrading",
so that KCP checks this annotation and does not proceed if etcd cluster is upgrading. The etcdadm controller removes this annotation once the etcd upgrade is complete.
*/
err = p.providerKubectlClient.UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", clusterName),
map[string]string{etcdv1beta1.UpgradeInProgressAnnotation: "true"},
executables.WithCluster(bootstrapCluster),
executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return "", err
}
return common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now), nil
}
}
func (p *cloudstackProvider) generateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
clusterName := newClusterSpec.Cluster.Name
var controlPlaneTemplateName, etcdTemplateName string
c, err := p.providerKubectlClient.GetEksaCluster(ctx, workloadCluster, clusterName)
if err != nil {
return nil, nil, err
}
csdc, err := p.providerKubectlClient.GetEksaCloudStackDatacenterConfig(ctx, newClusterSpec.CloudStackDatacenter.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return nil, nil, err
}
csdc.SetDefaults()
currentSpec.CloudStackDatacenter = csdc
controlPlaneTemplateName, err = p.getControlPlaneNameForCAPISpecUpgrade(ctx, c, currentSpec, newClusterSpec, bootstrapCluster, workloadCluster, csdc, clusterName)
if err != nil {
return nil, nil, err
}
workersSpec, err = p.getWorkloadTemplateSpecForCAPISpecUpgrade(ctx, currentSpec, newClusterSpec, bootstrapCluster, workloadCluster, csdc, clusterName)
if err != nil {
return nil, nil, err
}
if newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdTemplateName, err = p.getEtcdTemplateNameForCAPISpecUpgrade(ctx, c, currentSpec, newClusterSpec, bootstrapCluster, workloadCluster, csdc, clusterName)
if err != nil {
return nil, nil, err
}
}
cpOpt := func(values map[string]interface{}) {
values[cpTemplateNameKey] = controlPlaneTemplateName
values[etcdTemplateNameKey] = etcdTemplateName
}
controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(newClusterSpec, cpOpt)
if err != nil {
return nil, nil, err
}
return controlPlaneSpec, workersSpec, nil
}
func (p *cloudstackProvider) GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
controlPlaneSpec, workersSpec, err = p.generateCAPISpecForUpgrade(ctx, bootstrapCluster, workloadCluster, currentSpec, clusterSpec)
if err != nil {
return nil, nil, fmt.Errorf("error generating cluster api spec contents: %v", err)
}
return controlPlaneSpec, workersSpec, nil
}
func (p *cloudstackProvider) GenerateCAPISpecForCreate(ctx context.Context, _ *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
controlPlaneSpec, workersSpec, err = p.generateCAPISpecForCreate(ctx, clusterSpec)
if err != nil {
return nil, nil, fmt.Errorf("generating cluster api Spec contents: %v", err)
}
return controlPlaneSpec, workersSpec, nil
}
func (p *cloudstackProvider) machineConfigsSpecChanged(ctx context.Context, cc *v1alpha1.Cluster, cluster *types.Cluster, newClusterSpec *cluster.Spec) (bool, error) {
for _, oldMcRef := range cc.MachineConfigRefs() {
existingCsmc, err := p.providerKubectlClient.GetEksaCloudStackMachineConfig(ctx, oldMcRef.Name, cluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return false, err
}
csmc, ok := newClusterSpec.CloudStackMachineConfigs[oldMcRef.Name]
if !ok {
p.log.V(3).Info(fmt.Sprintf("Old machine config spec %s not found in the existing spec", oldMcRef.Name))
return true, nil
}
if !existingCsmc.Spec.Equal(&csmc.Spec) {
p.log.V(3).Info(fmt.Sprintf("New machine config spec %s is different from the existing spec", oldMcRef.Name))
return true, nil
}
}
return false, nil
}
func (p *cloudstackProvider) CleanupProviderInfrastructure(_ context.Context) error {
return nil
}
func (p *cloudstackProvider) BootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
// Nothing to do
return nil
}
func (p *cloudstackProvider) Version(clusterSpec *cluster.Spec) string {
return clusterSpec.VersionsBundle.CloudStack.Version
}
func (p *cloudstackProvider) EnvMap(_ *cluster.Spec) (map[string]string, error) {
envMap := make(map[string]string)
for _, key := range requiredEnvs {
if env, ok := os.LookupEnv(key); ok && len(env) > 0 {
envMap[key] = env
} else {
return envMap, fmt.Errorf("warning required env not set %s", key)
}
}
return envMap, nil
}
func (p *cloudstackProvider) GetDeployments() map[string][]string {
return map[string][]string{"capc-system": {"capc-controller-manager"}}
}
func (p *cloudstackProvider) GetInfrastructureBundle(clusterSpec *cluster.Spec) *types.InfrastructureBundle {
bundle := clusterSpec.VersionsBundle
folderName := fmt.Sprintf("infrastructure-cloudstack/%s/", bundle.CloudStack.Version)
infraBundle := types.InfrastructureBundle{
FolderName: folderName,
Manifests: []releasev1alpha1.Manifest{
bundle.CloudStack.Components,
bundle.CloudStack.Metadata,
},
}
return &infraBundle
}
func (p *cloudstackProvider) DatacenterConfig(clusterSpec *cluster.Spec) providers.DatacenterConfig {
return clusterSpec.CloudStackDatacenter
}
func (p *cloudstackProvider) MachineConfigs(spec *cluster.Spec) []providers.MachineConfig {
annotateMachineConfig(
spec,
spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name,
spec.Cluster.ControlPlaneAnnotation(),
"true",
)
if p.clusterConfig.Spec.ExternalEtcdConfiguration != nil {
annotateMachineConfig(
spec,
spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name,
spec.Cluster.EtcdAnnotation(),
"true",
)
}
for _, workerNodeGroupConfiguration := range p.clusterConfig.Spec.WorkerNodeGroupConfigurations {
setMachineConfigManagedBy(spec, workerNodeGroupConfiguration.MachineGroupRef.Name)
}
machineConfigs := make([]providers.MachineConfig, 0, len(spec.CloudStackMachineConfigs))
for _, m := range spec.CloudStackMachineConfigs {
machineConfigs = append(machineConfigs, m)
}
return machineConfigs
}
func annotateMachineConfig(spec *cluster.Spec, machineConfigName, annotationKey, annotationValue string) {
machineConfig := spec.CloudStackMachineConfigs[machineConfigName]
if machineConfig.Annotations == nil {
machineConfig.Annotations = make(map[string]string, 1)
}
machineConfig.Annotations[annotationKey] = annotationValue
setMachineConfigManagedBy(spec, machineConfigName)
}
func setMachineConfigManagedBy(spec *cluster.Spec, machineConfigName string) {
machineConfig := spec.CloudStackMachineConfigs[machineConfigName]
if spec.Cluster.IsManaged() {
machineConfig.SetManagement(spec.Cluster.ManagedBy())
}
}
func (p *cloudstackProvider) UpgradeNeeded(ctx context.Context, newSpec, currentSpec *cluster.Spec, cluster *types.Cluster) (bool, error) {
cc := currentSpec.Cluster
existingCsdc, err := p.providerKubectlClient.GetEksaCloudStackDatacenterConfig(ctx, cc.Spec.DatacenterRef.Name, cluster.KubeconfigFile, newSpec.Cluster.Namespace)
if err != nil {
return false, err
}
existingCsdc.SetDefaults()
currentSpec.CloudStackDatacenter = existingCsdc
if !existingCsdc.Spec.Equal(&newSpec.CloudStackDatacenter.Spec) {
p.log.V(3).Info("New provider spec is different from the new spec")
return true, nil
}
machineConfigsSpecChanged, err := p.machineConfigsSpecChanged(ctx, cc, cluster, newSpec)
if err != nil {
return false, err
}
return machineConfigsSpecChanged, nil
}
func (p *cloudstackProvider) DeleteResources(ctx context.Context, clusterSpec *cluster.Spec) error {
for _, mc := range clusterSpec.CloudStackMachineConfigs {
if err := p.providerKubectlClient.DeleteEksaCloudStackMachineConfig(ctx, mc.Name, clusterSpec.ManagementCluster.KubeconfigFile, mc.Namespace); err != nil {
return err
}
}
return p.providerKubectlClient.DeleteEksaCloudStackDatacenterConfig(ctx, clusterSpec.CloudStackDatacenter.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.CloudStackDatacenter.Namespace)
}
func (p *cloudstackProvider) PostClusterDeleteValidate(_ context.Context, _ *types.Cluster) error {
// No validations
return nil
}
func (p *cloudstackProvider) PostMoveManagementToBootstrap(_ context.Context, _ *types.Cluster) error {
// NOOP
return nil
}
func (p *cloudstackProvider) validateMachineConfigsNameUniqueness(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
prevSpec, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName())
if err != nil {
return err
}
cpMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
if prevSpec.Spec.ControlPlaneConfiguration.MachineGroupRef.Name != cpMachineConfigName {
err := p.validateMachineConfigNameUniqueness(ctx, cpMachineConfigName, cluster, clusterSpec)
if err != nil {
return err
}
}
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil && prevSpec.Spec.ExternalEtcdConfiguration != nil {
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
if prevSpec.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name != etcdMachineConfigName {
err := p.validateMachineConfigNameUniqueness(ctx, etcdMachineConfigName, cluster, clusterSpec)
if err != nil {
return err
}
}
}
return nil
}
func (p *cloudstackProvider) validateMachineConfigNameUniqueness(ctx context.Context, machineConfigName string, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
em, err := p.providerKubectlClient.SearchCloudStackMachineConfig(ctx, machineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace())
if err != nil {
return err
}
if len(em) > 0 {
return fmt.Errorf("machineconfig %s already exists", machineConfigName)
}
return nil
}
func (p *cloudstackProvider) InstallCustomProviderComponents(ctx context.Context, kubeconfigFile string) error {
kubeVipDisabledString := strconv.FormatBool(features.IsActive(features.CloudStackKubeVipDisabled()))
return p.providerKubectlClient.SetEksaControllerEnvVar(ctx, features.CloudStackKubeVipDisabledEnvVar, kubeVipDisabledString, kubeconfigFile)
}
func machineDeploymentName(clusterName, nodeGroupName string) string {
return fmt.Sprintf("%s-%s", clusterName, nodeGroupName)
}
// PreCoreComponentsUpgrade staisfies the Provider interface.
func (p *cloudstackProvider) PreCoreComponentsUpgrade(
ctx context.Context,
cluster *types.Cluster,
clusterSpec *cluster.Spec,
) error {
return nil
}
| 993 |
eks-anywhere | aws | Go | package cloudstack
import (
"context"
"embed"
_ "embed"
"encoding/base64"
"errors"
"fmt"
"os"
"path"
"testing"
"time"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
//go:embed testdata
var configFS embed.FS
const (
expectedCloudStackName = "cloudstack"
cloudStackCloudConfigWithInvalidUrl = "W0dsb2JhbF0KdmVyaWZ5LXNzbCA9IGZhbHNlCmFwaS1rZXkgPSB0ZXN0LWtleTEKc2VjcmV0LWtleSA9IHRlc3Qtc2VjcmV0MQphcGktdXJsID0geHh4Cg=="
validCloudStackCloudConfig = "W0dsb2JhbF0KYXBpLWtleSAgICA9IGZha2UtYXBpLWtleQpzZWNyZXQta2V5ID0gZmFrZS1zZWNy\nZXQta2V5CmFwaS11cmwgICAgPSBodHRwOi8vMTAuMTEuMC4yOjgwODAvY2xpZW50L2FwaQoKW0ds\nb2JhbDJdCmFwaS1rZXkgICAgPSBmYWtlLWFwaS1rZXkKc2VjcmV0LWtleSA9IGZha2Utc2VjcmV0\nLWtleQphcGktdXJsICAgID0gaHR0cDovLzEwLjEyLjAuMjo4MDgwL2NsaWVudC9hcGkKCg=="
defaultCloudStackCloudConfigPath = "testdata/cloudstack_config_valid.ini"
)
var notFoundError = apierrors.NewNotFound(schema.GroupResource{}, "")
var expectedSecret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: v1.SchemeGroupVersion.Version,
},
ObjectMeta: metav1.ObjectMeta{
Namespace: constants.EksaSystemNamespace,
Name: "global",
},
Data: map[string][]byte{
"api-url": []byte("http://127.16.0.1:8080/client/api"),
"api-key": []byte("test-key1"),
"secret-key": []byte("test-secret1"),
"verify-ssl": []byte("false"),
},
}
func givenClusterConfig(t *testing.T, fileName string) *v1alpha1.Cluster {
return givenClusterSpec(t, fileName).Cluster
}
func givenClusterSpec(t *testing.T, fileName string) *cluster.Spec {
return test.NewFullClusterSpec(t, path.Join(testDataDir, fileName))
}
// TODO: Validate against validator operations instead of using wildcard, now that it's mocked. https://github.com/aws/eks-anywhere/issues/3944
func givenWildcardValidator(mockCtrl *gomock.Controller, clusterSpec *cluster.Spec) *MockProviderValidator {
validator := NewMockProviderValidator(mockCtrl)
validator.EXPECT().ValidateClusterMachineConfigs(gomock.Any(), gomock.Any()).SetArg(1, *clusterSpec).AnyTimes()
validator.EXPECT().ValidateCloudStackDatacenterConfig(gomock.Any(), clusterSpec.CloudStackDatacenter).AnyTimes()
validator.EXPECT().ValidateControlPlaneEndpointUniqueness(gomock.Any()).AnyTimes()
return validator
}
func fillClusterSpecWithClusterConfig(spec *cluster.Spec, clusterConfig *v1alpha1.Cluster) {
spec.Cluster = clusterConfig
}
func givenDatacenterConfig(t *testing.T, fileName string) *v1alpha1.CloudStackDatacenterConfig {
datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, fileName))
if err != nil {
t.Fatalf("unable to get datacenter config from file: %v", err)
}
datacenterConfig.SetDefaults()
return datacenterConfig
}
func givenMachineConfigs(t *testing.T, fileName string) map[string]*v1alpha1.CloudStackMachineConfig {
machineConfigs, err := v1alpha1.GetCloudStackMachineConfigs(path.Join(testDataDir, fileName))
if err != nil {
t.Fatalf("unable to get machine configs from file: %v", err)
}
return machineConfigs
}
func givenProvider(t *testing.T) *cloudstackProvider {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterConfig := clusterSpec.Cluster
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(
t,
datacenterConfig,
clusterConfig,
nil,
validator,
)
if provider == nil {
t.Fatalf("provider object is nil")
}
return provider
}
func workerNodeGroup1MachineDeployment() *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-0-template-1234567890000",
},
},
},
},
},
}
}
func workerNodeGroup2MachineDeployment() *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-1-template-1234567890000",
},
},
},
},
},
}
}
func saveContext(t *testing.T, configPath string) {
cloudStackCloudConfig, err := configFS.ReadFile(configPath)
if err != nil {
t.Fatalf("Failed to read cloudstack cloud-config file from %s: %v", configPath, err)
}
t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, base64.StdEncoding.EncodeToString(cloudStackCloudConfig))
t.Setenv(decoder.CloudStackCloudConfigB64SecretKey, os.Getenv(decoder.EksacloudStackCloudConfigB64SecretKey))
}
func setupContext(t *testing.T) {
saveContext(t, defaultCloudStackCloudConfigPath)
}
type providerTest struct {
*WithT
t *testing.T
ctx context.Context
managementCluster, workloadCluster *types.Cluster
provider *cloudstackProvider
cluster *v1alpha1.Cluster
clusterSpec *cluster.Spec
datacenterConfig *v1alpha1.CloudStackDatacenterConfig
machineConfigs map[string]*v1alpha1.CloudStackMachineConfig
kubectl *mocks.MockProviderKubectlClient
validator *MockProviderValidator
}
func newProviderTest(t *testing.T) *providerTest {
setupContext(t)
ctrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(ctrl)
spec := givenClusterSpec(t, testClusterConfigMainFilename)
p := &providerTest{
t: t,
WithT: NewWithT(t),
ctx: context.Background(),
managementCluster: &types.Cluster{
Name: "m-cluster",
KubeconfigFile: "kubeconfig-m.kubeconfig",
},
workloadCluster: &types.Cluster{
Name: "test",
KubeconfigFile: "kubeconfig-w.kubeconfig",
},
cluster: spec.Cluster,
clusterSpec: spec,
datacenterConfig: spec.CloudStackDatacenter,
machineConfigs: spec.CloudStackMachineConfigs,
kubectl: kubectl,
validator: givenWildcardValidator(ctrl, spec),
}
p.buildNewProvider()
return p
}
func (tt *providerTest) buildNewProvider() {
tt.provider = newProvider(
tt.t,
tt.clusterSpec.CloudStackDatacenter,
tt.clusterSpec.Cluster,
tt.kubectl,
tt.validator,
)
}
func TestNewProvider(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterConfig := clusterSpec.Cluster
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(
t,
datacenterConfig,
clusterConfig,
kubectl,
validator,
)
if provider == nil {
t.Fatalf("provider object is nil")
}
}
func newProviderWithKubectl(t *testing.T, datacenterConfig *v1alpha1.CloudStackDatacenterConfig, clusterConfig *v1alpha1.Cluster, kubectl ProviderKubectlClient, validator ProviderValidator) *cloudstackProvider {
return newProvider(
t,
datacenterConfig,
clusterConfig,
kubectl,
validator,
)
}
func newProvider(t *testing.T, datacenterConfig *v1alpha1.CloudStackDatacenterConfig, clusterConfig *v1alpha1.Cluster, kubectl ProviderKubectlClient, validator ProviderValidator) *cloudstackProvider {
_, writer := test.NewWriter(t)
return NewProvider(datacenterConfig, clusterConfig, kubectl, validator, writer, test.FakeNow, test.NewNullLogger())
}
func TestProviderGenerateCAPISpecForCreate(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantCPFile string
wantMDFile string
}{
{
testName: "main",
clusterconfigFile: testClusterConfigMainFilename,
wantCPFile: "testdata/expected_results_main_cp.yaml",
wantMDFile: "testdata/expected_results_main_md.yaml",
},
{
testName: "main with rollout strategies",
clusterconfigFile: "cluster_main_with_rollout_strategy.yaml",
wantCPFile: "testdata/expected_results_main_rollout_strategy_cp.yaml",
wantMDFile: "testdata/expected_results_main_rollout_strategy_md.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), tt.wantCPFile)
test.AssertContentToFile(t, string(md), tt.wantMDFile)
})
}
}
func TestProviderGenerateCAPISpecForCreateWithAutoscalingConfiguration(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
wng := &clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0]
ca := &v1alpha1.AutoScalingConfiguration{
MaxCount: 5,
MinCount: 3,
}
wng.AutoScalingConfiguration = ca
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_autoscaling_md.yaml")
}
func TestProviderSetupAndValidateCreateClusterFailureOnInvalidUrl(t *testing.T) {
tt := NewWithT(t)
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, cloudStackCloudConfigWithInvalidUrl)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
tt.Expect(err.Error()).To(Equal("validating environment variables: CloudStack instance global's managementApiEndpoint xxx is invalid: CloudStack managementApiEndpoint is invalid: #{err}"))
}
func TestProviderCreateOrUpgradeClusterK8s125(t *testing.T) {
tt := NewWithT(t)
setupContext(t)
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.Cluster.Spec.KubernetesVersion = "1.25"
provider := newProviderWithKubectl(t, nil, clusterSpec.Cluster, nil, nil)
if provider == nil {
t.Fatalf("provider object is nil")
}
t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, validCloudStackCloudConfig)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
tt.Expect(err.Error()).To(Equal("validating K8s version for provider: cloudstack provider does not support K8s version > 1.24"))
err = provider.SetupAndValidateUpgradeCluster(ctx, nil, clusterSpec, nil)
tt.Expect(err.Error()).To(Equal("validating K8s version for provider: cloudstack provider does not support K8s version > 1.24"))
clusterSpec.Cluster.Spec.KubernetesVersion = "abcd"
err = provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
tt.Expect(err.Error()).To(Equal("validating K8s version for provider: converting kubeVersion abcd to semver invalid major version in semver abcd.0: strconv.ParseUint: parsing \"\": invalid syntax"))
}
func TestProviderSetupAndValidateUpgradeClusterFailureOnInvalidUrl(t *testing.T) {
tt := NewWithT(t)
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, cloudStackCloudConfigWithInvalidUrl)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
tt.Expect(err.Error()).To(Equal("validating environment variables: CloudStack instance global's managementApiEndpoint xxx is invalid: CloudStack managementApiEndpoint is invalid: #{err}"))
}
func TestProviderSetupAndValidateDeleteClusterFailureOnInvalidUrl(t *testing.T) {
tt := NewWithT(t)
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, cloudStackCloudConfigWithInvalidUrl)
err := provider.SetupAndValidateDeleteCluster(ctx, cluster, nil)
tt.Expect(err.Error()).To(Equal("validating environment variables: CloudStack instance global's managementApiEndpoint xxx is invalid: CloudStack managementApiEndpoint is invalid: #{err}"))
}
func TestProviderSetupAndValidateUpgradeClusterFailureOnGetSecretFailure(t *testing.T) {
tt := NewWithT(t)
clusterSpecManifest := "cluster_main.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
secretFailureMsg := "getting secret for profile global: test-error"
validator.EXPECT().ValidateSecretsUnchanged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewBadRequest(secretFailureMsg))
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
tt.Expect(err.Error()).To(Equal("validating secrets unchanged: getting secret for profile global: test-error"))
}
func TestProviderSetupAndValidateUpgradeClusterSuccessOnSecretNotFound(t *testing.T) {
tt := NewWithT(t)
clusterSpecManifest := "cluster_main.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
validator.EXPECT().ValidateSecretsUnchanged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
tt.Expect(err).To(BeNil())
}
func TestProviderSetupAndValidateUpgradeClusterFailureOnSecretChanged(t *testing.T) {
tt := NewWithT(t)
clusterSpecManifest := "cluster_main.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
modifiedSecret := expectedSecret.DeepCopy()
modifiedSecret.Data["api-key"] = []byte("updated-api-key")
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
changedSecretMsg := "profile global is different from secret"
validator.EXPECT().ValidateSecretsUnchanged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf(changedSecretMsg))
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
tt.Expect(err).NotTo(BeNil())
}
func TestProviderGenerateCAPISpecForCreateWithAffinity(t *testing.T) {
clusterSpecManifest := "cluster_affinity.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_affinity_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_affinity_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithZoneIdAndNetworkId(t *testing.T) {
clusterSpecManifest := "cluster_main.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
datacenterConfig.Spec.AvailabilityZones[0].Zone = v1alpha1.CloudStackZone{
Id: "zoneId",
Network: v1alpha1.CloudStackResourceIdentifier{
Id: "networkId",
},
}
clusterSpec.CloudStackDatacenter = datacenterConfig
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_resourceids_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithMirrorConfig(t *testing.T) {
clusterSpecManifest := "cluster_mirror_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_config_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithMirrorAndCertConfig(t *testing.T) {
clusterSpecManifest := "cluster_mirror_with_cert_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_config_with_cert_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_config_with_cert_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithMirrorConfigInsecureSkipVerify(t *testing.T) {
clusterSpecManifest := "cluster_mirror_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = test.RegistryMirrorInsecureSkipVerifyEnabled()
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_config_with_insecure_skip_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_config_with_insecure_skip_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithMirrorConfigInsecureSkipVerifyAndCert(t *testing.T) {
clusterSpecManifest := "cluster_mirror_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert()
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_config_with_insecure_skip_and_cert_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_config_with_insecure_skip_and_cert_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithProxyConfig(t *testing.T) {
clusterSpecManifest := "cluster_minimal_proxy.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_minimal_proxy_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_minimal_proxy_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithMultipleWorkerNodeGroups(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, "cluster_main_multiple_worker_node_groups.yaml")
datacenterConfig := givenDatacenterConfig(t, "cluster_main_multiple_worker_node_groups.yaml")
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
_, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_multiple_worker_node_groups.yaml")
}
func TestUpdateKubeConfig(t *testing.T) {
provider := givenProvider(t)
content := []byte{}
err := provider.UpdateKubeConfig(&content, "clusterName")
if err != nil {
t.Fatalf("failed UpdateKubeConfig: %v", err)
}
}
func TestBootstrapClusterOpts(t *testing.T) {
clusterSpecManifest := "cluster_minimal_proxy.yaml"
provider := givenProvider(t)
provider.clusterConfig = givenClusterConfig(t, clusterSpecManifest)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
bootstrapClusterOps, err := provider.BootstrapClusterOpts(clusterSpec)
if err != nil {
t.Fatalf("failed BootstrapClusterOpts: %v", err)
}
if bootstrapClusterOps == nil {
t.Fatalf("expected BootstrapClusterOpts")
}
}
func TestName(t *testing.T) {
provider := givenProvider(t)
if provider.Name() != expectedCloudStackName {
t.Fatalf("unexpected Name %s!=%s", provider.Name(), expectedCloudStackName)
}
}
func TestSetupAndValidateCreateCluster(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateCreateWorkloadClusterSuccess(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
setupContext(t)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
newMachineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
provider.validator = givenWildcardValidator(mockCtrl, clusterSpec)
for _, config := range newMachineConfigs {
kubectl.EXPECT().SearchCloudStackMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.CloudStackMachineConfig{}, nil)
}
kubectl.EXPECT().SearchCloudStackDatacenterConfig(context.TODO(), datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.CloudStackDatacenterConfig{}, nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestSetupAndValidateCreateWorkloadClusterFailsIfMachineExists(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
setupContext(t)
newMachineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
provider.validator = givenWildcardValidator(mockCtrl, clusterSpec)
idx := 0
var existingMachine string
for _, config := range newMachineConfigs {
if idx == 0 {
kubectl.EXPECT().SearchCloudStackMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.CloudStackMachineConfig{config}, nil)
existingMachine = config.Name
} else {
kubectl.EXPECT().SearchCloudStackMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.CloudStackMachineConfig{}, nil).MaxTimes(1)
}
idx++
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, fmt.Sprintf("CloudStackMachineConfig %s already exists", existingMachine), err)
}
func TestSetupAndValidateSelfManagedClusterSkipMachineNameValidateSuccess(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
kubectl.EXPECT().SearchCloudStackMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestSetupAndValidateCreateWorkloadClusterFailsIfDatacenterExists(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
setupContext(t)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
newMachineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
provider.validator = givenWildcardValidator(mockCtrl, clusterSpec)
for _, config := range newMachineConfigs {
kubectl.EXPECT().SearchCloudStackMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.CloudStackMachineConfig{}, nil)
}
kubectl.EXPECT().SearchCloudStackDatacenterConfig(context.TODO(), datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.CloudStackDatacenterConfig{datacenterConfig}, nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, fmt.Sprintf("CloudStackDatacenter %s already exists", datacenterConfig.Name), err)
}
func TestSetupAndValidateSelfManagedClusterSkipDatacenterNameValidateSuccess(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
kubectl.EXPECT().SearchCloudStackMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
kubectl.EXPECT().SearchCloudStackDatacenterConfig(context.TODO(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestSetupAndValidateDeleteCluster(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateDeleteCluster(ctx, nil, nil)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestCleanupProviderInfrastructure(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
setupContext(t)
err := provider.CleanupProviderInfrastructure(ctx)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestVersion(t *testing.T) {
cloudStackProviderVersion := "v4.14.1"
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.VersionsBundle.CloudStack.Version = cloudStackProviderVersion
setupContext(t)
result := provider.Version(clusterSpec)
if result != cloudStackProviderVersion {
t.Fatalf("Unexpected version expected <%s> actual=<%s>", cloudStackProviderVersion, result)
}
}
func TestPreCAPIInstallOnBootstrap(t *testing.T) {
tests := []struct {
testName string
configPath string
expectedSecretsYamlPath string
}{
{
testName: "valid single profile",
configPath: defaultCloudStackCloudConfigPath,
expectedSecretsYamlPath: "testdata/expected_secrets_single.yaml",
},
}
mockCtrl := gomock.NewController(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
for _, test := range tests {
saveContext(t, test.configPath)
expectedSecretsYaml, err := configFS.ReadFile(test.expectedSecretsYamlPath)
if err != nil {
t.Fatalf("Failed to read embed eksd release: %s", err)
}
kubectl.EXPECT().GetSecretFromNamespace(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil, apierrors.NewNotFound(schema.GroupResource{}, ""))
kubectl.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), expectedSecretsYaml)
_ = provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err := provider.PreCAPIInstallOnBootstrap(ctx, cluster, clusterSpec); err != nil {
t.Fatalf("provider.PreCAPIInstallOnBootstrap() err = %v, want err = nil", err)
}
}
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyCP(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for control plane machine")
}
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyWorker(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for worker node machine")
}
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyEtcd(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey did not get generated for etcd machine")
}
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyAllMachineConfigs(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for control plane machine")
}
if clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for worker node machine")
}
if clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey not generated for etcd machines")
}
if clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] != clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] {
t.Fatalf("sshAuthorizedKey not the same for controlplane and worker machines")
}
if clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] != clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] {
t.Fatalf("sshAuthorizedKey not the same for controlplane and etcd machines")
}
}
func TestGetInfrastructureBundleSuccess(t *testing.T) {
tests := []struct {
testName string
clusterSpec *cluster.Spec
}{
{
testName: "correct Overrides layer",
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.CloudStack = releasev1alpha1.CloudStackBundle{
Version: "v0.1.0",
ClusterAPIController: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.1.0",
},
KubeRbacProxy: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/brancz/kube-rbac-proxy:v0.8.0-25df7d96779e2a305a22c6e3f9425c3465a77244",
},
KubeVip: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774",
},
Metadata: releasev1alpha1.Manifest{
URI: "Metadata.yaml",
},
Components: releasev1alpha1.Manifest{
URI: "Components.yaml",
},
}
}),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
p := givenProvider(t)
infraBundle := p.GetInfrastructureBundle(tt.clusterSpec)
if infraBundle == nil {
t.Fatalf("provider.GetInfrastructureBundle() should have an infrastructure bundle")
}
assert.Equal(t, "infrastructure-cloudstack/v0.1.0/", infraBundle.FolderName, "Incorrect folder name")
assert.Equal(t, len(infraBundle.Manifests), 2, "Wrong number of files in the infrastructure bundle")
wantManifests := []releasev1alpha1.Manifest{
tt.clusterSpec.VersionsBundle.CloudStack.Components,
tt.clusterSpec.VersionsBundle.CloudStack.Metadata,
}
assert.ElementsMatch(t, infraBundle.Manifests, wantManifests, "Incorrect manifests")
})
}
}
func TestGetDatacenterConfig(t *testing.T) {
provider := givenProvider(t)
providerConfig := provider.DatacenterConfig(givenClusterSpec(t, testClusterConfigMainFilename))
if providerConfig.Kind() != "CloudStackDatacenterConfig" {
t.Fatalf("Unexpected error DatacenterConfig: kind field not found: %s", providerConfig.Kind())
}
}
func TestProviderDeleteResources(t *testing.T) {
mockCtrl := gomock.NewController(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.ManagementCluster = &types.Cluster{
KubeconfigFile: "testKubeConfig",
}
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
for _, mc := range machineConfigs {
kubectl.EXPECT().DeleteEksaCloudStackMachineConfig(ctx, mc.Name, clusterSpec.ManagementCluster.KubeconfigFile, mc.Namespace)
}
kubectl.EXPECT().DeleteEksaCloudStackDatacenterConfig(ctx, provider.datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, provider.datacenterConfig.Namespace)
err := provider.DeleteResources(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestChangeDiffNoChange(t *testing.T) {
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
assert.Nil(t, provider.ChangeDiff(clusterSpec, clusterSpec))
}
func TestChangeDiffWithChange(t *testing.T) {
provider := givenProvider(t)
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.CloudStack.Version = "v0.2.0"
})
newClusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.CloudStack.Version = "v0.1.0"
})
wantDiff := &types.ComponentChangeDiff{
ComponentName: "cloudstack",
NewVersion: "v0.1.0",
OldVersion: "v0.2.0",
}
assert.Equal(t, wantDiff, provider.ChangeDiff(clusterSpec, newClusterSpec))
}
func TestProviderGenerateCAPISpecForUpgradeUpdateMachineTemplate(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantCPFile string
wantMDFile string
}{
{
testName: "minimal",
clusterconfigFile: "cluster_minimal.yaml",
wantCPFile: "testdata/expected_results_minimal_cp.yaml",
wantMDFile: "testdata/expected_results_minimal_md.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
cloudstackDatacenter := &v1alpha1.CloudStackDatacenterConfig{
Spec: v1alpha1.CloudStackDatacenterConfigSpec{},
}
cloudstackMachineConfig := &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{
{
Name: "capv",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
},
}
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackDatacenter, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil)
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec.DeepCopy())
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), tt.wantCPFile)
test.AssertContentToFile(t, string(md), tt.wantMDFile)
})
}
}
func TestProviderGenerateCAPISpecForUpgradeIncompleteClusterSpec(t *testing.T) {
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.CloudStackDatacenter = nil
templateBuilder := NewTemplateBuilder(time.Now)
if _, err := templateBuilder.GenerateCAPISpecControlPlane(clusterSpec); err == nil {
t.Fatalf("Expected error for incomplete cluster spec, but no error occurred")
}
}
func TestProviderGenerateCAPISpecForUpgradeUpdateMachineTemplateExternalEtcd(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantCPFile string
wantMDFile string
}{
{
testName: "main",
clusterconfigFile: testClusterConfigMainFilename,
wantCPFile: "testdata/expected_results_main_cp.yaml",
wantMDFile: "testdata/expected_results_main_md.yaml",
},
{
testName: "main_with_taints",
clusterconfigFile: "cluster_main_with_taints.yaml",
wantCPFile: "testdata/expected_results_main_with_taints_cp.yaml",
wantMDFile: "testdata/expected_results_main_with_taints_md.yaml",
},
{
testName: "main with node labels",
clusterconfigFile: "cluster_main_with_node_labels.yaml",
wantCPFile: "testdata/expected_results_main_cp.yaml",
wantMDFile: "testdata/expected_results_main_node_labels_md.yaml",
},
{
testName: "main with rollout strategies",
clusterconfigFile: "cluster_main_with_rollout_strategy.yaml",
wantCPFile: "testdata/expected_results_main_rollout_strategy_cp.yaml",
wantMDFile: "testdata/expected_results_main_rollout_strategy_md.yaml",
},
{
testName: "main with cp node labels",
clusterconfigFile: "cluster_main_with_cp_node_labels.yaml",
wantCPFile: "testdata/expected_results_main_node_labels_cp.yaml",
wantMDFile: "testdata/expected_results_main_md.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
cloudstackDatacenter := &v1alpha1.CloudStackDatacenterConfig{
Spec: v1alpha1.CloudStackDatacenterConfigSpec{},
}
cloudstackMachineConfig := &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{
{
Name: "capv",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
},
}
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackDatacenter, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil)
kubectl.EXPECT().UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", cluster.Name), map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"}, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster)))
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec.DeepCopy())
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), tt.wantCPFile)
test.AssertContentToFile(t, string(md), tt.wantMDFile)
})
}
}
func TestProviderGenerateCAPISpecForUpgradeNotUpdateMachineTemplate(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
oldCP := &controlplanev1.KubeadmControlPlane{
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
Name: "test-control-plane-template-original",
},
},
},
}
oldMD := &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
InfrastructureRef: v1.ObjectReference{
Name: "test-md-0-original",
},
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-0-template-original",
},
},
},
},
},
}
etcdadmCluster := &etcdv1.EtcdadmCluster{
Spec: etcdv1.EtcdadmClusterSpec{
InfrastructureTemplate: v1.ObjectReference{
Name: "test-etcd-template-original",
},
},
}
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
machineDeploymentName := fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(datacenterConfig, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, controlPlaneMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(machineConfigs[controlPlaneMachineConfigName], nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, workerNodeMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(machineConfigs[workerNodeMachineConfigName], nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, etcdMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(machineConfigs[etcdMachineConfigName], nil)
kubectl.EXPECT().GetKubeadmControlPlane(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldCP, nil)
kubectl.EXPECT().GetMachineDeployment(ctx, machineDeploymentName, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldMD, nil).Times(2)
kubectl.EXPECT().GetEtcdadmCluster(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(etcdadmCluster, nil)
cp, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec.DeepCopy())
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_no_machinetemplate_update_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_no_machinetemplate_update_md.yaml")
}
func TestProviderGenerateCAPISpecForUpgradeUpdateMachineGroupRefs(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
oldCP := &controlplanev1.KubeadmControlPlane{
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
Name: "test-control-plane-template-1234567890000",
},
},
},
}
oldMD := &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
InfrastructureRef: v1.ObjectReference{
Name: "test-md-0-1234567890000",
},
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-0-template-1234567890000",
},
},
},
},
},
}
etcdadmCluster := &etcdv1.EtcdadmCluster{
Spec: etcdv1.EtcdadmClusterSpec{
InfrastructureTemplate: v1.ObjectReference{
Name: "test-etcd-template-1234567890000",
},
},
}
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
machineDeploymentName := fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
wnMachineConfig := machineConfigs[workerNodeMachineConfigName]
newClusterSpec := clusterSpec.DeepCopy()
newWorkersMachineConfigName := "new-test-wn"
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = newWorkersMachineConfigName
newWorkerMachineConfig := wnMachineConfig.DeepCopy()
newWorkerMachineConfig.Name = newWorkersMachineConfigName
newClusterSpec.CloudStackMachineConfigs[newWorkersMachineConfigName] = newWorkerMachineConfig
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(datacenterConfig, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, controlPlaneMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(machineConfigs[controlPlaneMachineConfigName], nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, workerNodeMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(machineConfigs[workerNodeMachineConfigName], nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, etcdMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(machineConfigs[etcdMachineConfigName], nil)
kubectl.EXPECT().GetKubeadmControlPlane(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldCP, nil)
kubectl.EXPECT().GetMachineDeployment(ctx, machineDeploymentName, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldMD, nil).Times(2)
kubectl.EXPECT().GetEtcdadmCluster(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(etcdadmCluster, nil)
provider.templateBuilder.now = test.NewFakeNow
_, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, newClusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md.yaml")
}
func TestProviderGenerateCAPISpecForUpgradeMultipleWorkerNodeGroups(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantMDFile string
}{
{
testName: "adding a worker node group",
clusterconfigFile: "cluster_main_multiple_worker_node_groups.yaml",
wantMDFile: "testdata/expected_results_minimal_add_worker_node_group.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
cloudstackDatacenter := &v1alpha1.CloudStackDatacenterConfig{
Spec: v1alpha1.CloudStackDatacenterConfigSpec{},
}
cloudstackMachineConfig := &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{
{
Name: "capv",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
},
}
newClusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
newConfig := v1alpha1.WorkerNodeGroupConfiguration{Count: ptr.Int(1), MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "CloudStackMachineConfig"}, Name: "md-2"}
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = append(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newConfig)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup2MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackDatacenter, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil).AnyTimes()
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(cloudstackMachineConfig, nil)
kubectl.EXPECT().UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", cluster.Name), map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"}, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster)))
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
_, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, newClusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(md), tt.wantMDFile)
})
}
}
func TestSetupAndValidateUpgradeCluster(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cluster := &types.Cluster{}
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, clusterSpec.CloudStackDatacenter, clusterSpec.Cluster,
kubectl, validator)
setupContext(t)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil)
validator.EXPECT().ValidateSecretsUnchanged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateUpgradeClusterCPSshNotExists(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, clusterSpec.CloudStackDatacenter, clusterSpec.Cluster, kubectl, validator)
clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil)
validator.EXPECT().ValidateSecretsUnchanged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateUpgradeClusterWorkerSshNotExists(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, clusterSpec.CloudStackDatacenter, clusterSpec.Cluster,
kubectl, validator)
clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil)
validator.EXPECT().ValidateSecretsUnchanged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateUpgradeClusterEtcdSshNotExists(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, clusterSpec.CloudStackDatacenter, clusterSpec.Cluster,
kubectl, validator)
clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil)
validator.EXPECT().ValidateSecretsUnchanged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestValidateMachineConfigsNameUniquenessSuccess(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Name: "test",
}
prevSpec := tt.clusterSpec.DeepCopy()
prevSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "prev-test-cp"
prevSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "prev-test-etcd"
machineConfigs := tt.clusterSpec.CloudStackMachineConfigs
tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.Name).Return(prevSpec.Cluster, nil)
for _, config := range machineConfigs {
tt.kubectl.EXPECT().SearchCloudStackMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.CloudStackMachineConfig{}, nil).AnyTimes()
}
err := tt.provider.validateMachineConfigsNameUniqueness(tt.ctx, cluster, tt.clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestValidateMachineConfigsNameUniquenessError(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Name: "test",
}
prevSpec := tt.clusterSpec.DeepCopy()
prevSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "prev-test-cp"
prevSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "prev-test-etcd"
machineConfigs := tt.clusterSpec.CloudStackMachineConfigs
dummyMachineConfig := &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{Users: []v1alpha1.UserConfiguration{{Name: "capc"}}},
}
tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.Name).Return(prevSpec.Cluster, nil)
for _, config := range machineConfigs {
tt.kubectl.EXPECT().SearchCloudStackMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.CloudStackMachineConfig{dummyMachineConfig}, nil).AnyTimes()
}
err := tt.provider.validateMachineConfigsNameUniqueness(tt.ctx, cluster, tt.clusterSpec)
thenErrorExpected(t, fmt.Sprintf("machineconfig %s already exists", tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name), err)
}
func TestClusterUpgradeNeededNoChanges(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cc := givenClusterConfig(t, testClusterConfigMainFilename)
fillClusterSpecWithClusterConfig(clusterSpec, cc)
cluster := &types.Cluster{
KubeconfigFile: "test",
}
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
machineConfigsMap := givenMachineConfigs(t, testClusterConfigMainFilename)
for _, value := range machineConfigsMap {
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, value.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(value, nil)
}
provider := newProviderWithKubectl(t, dcConfig, cc, kubectl, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cc.Spec.DatacenterRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(dcConfig, nil)
specChanged, err := provider.UpgradeNeeded(ctx, clusterSpec, clusterSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
if specChanged {
t.Fatalf("expected no spec change to be detected")
}
}
func TestClusterNeedsNewWorkloadTemplateFalse(t *testing.T) {
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cc := givenClusterConfig(t, testClusterConfigMainFilename)
fillClusterSpecWithClusterConfig(clusterSpec, cc)
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
machineConfig := givenMachineConfigs(t, testClusterConfigMainFilename)[cc.MachineConfigRefs()[0].Name]
assert.False(t, NeedsNewWorkloadTemplate(clusterSpec, clusterSpec, dcConfig, dcConfig, machineConfig, machineConfig, test.NewNullLogger()), "expected no spec change to be detected")
}
func TestClusterUpgradeNeededDatacenterConfigChanged(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cc := givenClusterConfig(t, testClusterConfigMainFilename)
fillClusterSpecWithClusterConfig(clusterSpec, cc)
cluster := &types.Cluster{
KubeconfigFile: "test",
}
newClusterSpec := clusterSpec.DeepCopy()
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
shinyModifiedDcConfig := dcConfig.DeepCopy()
shinyModifiedDcConfig.Spec.AvailabilityZones[0].ManagementApiEndpoint = "shiny-new-api-endpoint"
newClusterSpec.CloudStackDatacenter = shinyModifiedDcConfig
provider := newProviderWithKubectl(t, nil, cc, kubectl, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cc.Spec.DatacenterRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(givenDatacenterConfig(t, testClusterConfigMainFilename), nil)
specChanged, err := provider.UpgradeNeeded(ctx, newClusterSpec, clusterSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
if !specChanged {
t.Fatalf("expected spec change but none was detected")
}
}
func TestClusterUpgradeNeededMachineConfigsChanged(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cc := givenClusterConfig(t, testClusterConfigMainFilename)
fillClusterSpecWithClusterConfig(clusterSpec, cc)
cluster := &types.Cluster{
KubeconfigFile: "test",
}
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
machineConfigsMap := givenMachineConfigs(t, testClusterConfigMainFilename)
modifiedMachineConfig := machineConfigsMap[cc.MachineConfigRefs()[0].Name].DeepCopy()
modifiedMachineConfig.Spec.Affinity = "shiny-new-affinity"
kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(modifiedMachineConfig, nil)
provider := newProviderWithKubectl(t, dcConfig, cc, kubectl, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cc.Spec.DatacenterRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(dcConfig, nil)
specChanged, err := provider.UpgradeNeeded(ctx, clusterSpec, clusterSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
if !specChanged {
t.Fatalf("expected spec change but none was detected")
}
}
func TestClusterUpgradeNeededMachineConfigsChangedDiskOffering(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cc := givenClusterConfig(t, testClusterConfigMainFilename)
fillClusterSpecWithClusterConfig(clusterSpec, cc)
cluster := &types.Cluster{
KubeconfigFile: "test",
}
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
machineConfigsMap := givenMachineConfigs(t, testClusterConfigMainFilename)
getEksaCloudStackMachineConfig := kubectl.EXPECT().GetEksaCloudStackMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).AnyTimes()
getEksaCloudStackMachineConfig.DoAndReturn(
func(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackMachineConfig, error) {
if cloudstackMachineConfigName == "test" {
modifiedMachineConfig := machineConfigsMap["test"].DeepCopy()
modifiedMachineConfig.Spec.DiskOffering = (*machineConfigsMap["test"].Spec.DiskOffering).DeepCopy()
modifiedMachineConfig.Spec.DiskOffering.Name = "shiny-new-diskoffering"
return modifiedMachineConfig, nil
}
return machineConfigsMap[cloudstackMachineConfigName], nil
})
provider := newProviderWithKubectl(t, dcConfig, cc, kubectl, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(ctx, cc.Spec.DatacenterRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(dcConfig, nil)
specChanged, err := provider.UpgradeNeeded(ctx, clusterSpec, clusterSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
if !specChanged {
t.Fatalf("expected spec change but none was detected")
}
}
func TestNeedNewMachineTemplate(t *testing.T) {
for _, tc := range []struct {
Name string
ConfigureDatacenter func(old, nw *v1alpha1.CloudStackDatacenterConfig)
ConfigureMachines func(old, nw *v1alpha1.CloudStackMachineConfig)
Expect bool
}{
{
Name: "Equivalent",
},
{
// We can't retrieve the ManagementApiEndpoint for an availability zone from the
// cloudstackv1.CloudStackCluster resource so a difference should be ignored.
//
// The criteria for changing this test and the context under which it was written
// is unclear.
Name: "AvailabilityZones_MissingManagementAPIEndpoint",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
nw.Spec.AvailabilityZones[0].ManagementApiEndpoint = ""
},
},
{
Name: "Datacenter_AvailabilityZones_Add",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
old.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "name",
CredentialsRef: "credentials_ref",
Zone: v1alpha1.CloudStackZone{
Name: "name",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
},
Domain: "domain",
Account: "account",
ManagementApiEndpoint: "management_api_endpoint",
},
}
nw.Spec.AvailabilityZones = append(
[]v1alpha1.CloudStackAvailabilityZone{},
old.Spec.AvailabilityZones...,
)
az := old.Spec.AvailabilityZones[0].DeepCopy()
az.Name = "shinyNewAz"
nw.Spec.AvailabilityZones = append(nw.Spec.AvailabilityZones, *az)
},
Expect: true,
},
{
Name: "Datacenter_AvailabilityZones_Removed",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
old.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "name",
CredentialsRef: "credentials_ref",
Zone: v1alpha1.CloudStackZone{
Name: "name",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
},
Domain: "domain",
Account: "account",
ManagementApiEndpoint: "management_api_endpoint",
},
}
nw.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{}
},
Expect: true,
},
{
Name: "Datacenter_AvailabilityZones_ChangeCredentialsRef",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
old.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "name",
CredentialsRef: "credentials_ref",
Zone: v1alpha1.CloudStackZone{
Name: "name",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
},
Domain: "domain",
Account: "account",
ManagementApiEndpoint: "management_api_endpoint",
},
}
nw.Spec.AvailabilityZones = append(
[]v1alpha1.CloudStackAvailabilityZone{},
old.Spec.AvailabilityZones...,
)
nw.Spec.AvailabilityZones[0].CredentialsRef = "new_credentials_ref"
},
Expect: true,
},
{
Name: "Datacenter_AvailabilityZones_ChangeZoneName",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
old.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "name",
CredentialsRef: "credentials_ref",
Zone: v1alpha1.CloudStackZone{
Name: "name",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
},
Domain: "domain",
Account: "account",
ManagementApiEndpoint: "management_api_endpoint",
},
}
nw.Spec.AvailabilityZones = append(
[]v1alpha1.CloudStackAvailabilityZone{},
old.Spec.AvailabilityZones...,
)
nw.Spec.AvailabilityZones[0].Zone.Name = "new_credentials_ref"
},
Expect: true,
},
{
Name: "Datacenter_AvailabilityZones_ChangeZoneNetwork",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
old.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "name",
CredentialsRef: "credentials_ref",
Zone: v1alpha1.CloudStackZone{
Name: "name",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
},
Domain: "domain",
Account: "account",
ManagementApiEndpoint: "management_api_endpoint",
},
}
nw.Spec.AvailabilityZones = append(
[]v1alpha1.CloudStackAvailabilityZone{},
old.Spec.AvailabilityZones...,
)
nw.Spec.AvailabilityZones[0].Zone.Network = v1alpha1.CloudStackResourceIdentifier{
Name: "new_name",
}
},
Expect: true,
},
{
Name: "Datacenter_AvailabilityZones_ChangeDomain",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
old.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "name",
CredentialsRef: "credentials_ref",
Zone: v1alpha1.CloudStackZone{
Name: "name",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
},
Domain: "domain",
Account: "account",
ManagementApiEndpoint: "management_api_endpoint",
},
}
nw.Spec.AvailabilityZones = append(
[]v1alpha1.CloudStackAvailabilityZone{},
old.Spec.AvailabilityZones...,
)
nw.Spec.AvailabilityZones[0].Domain = "shinyNewDomain"
},
Expect: true,
},
{
Name: "Datacenter_AvailabilityZones_ChangeAccount",
ConfigureDatacenter: func(old, nw *v1alpha1.CloudStackDatacenterConfig) {
old.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "name",
CredentialsRef: "credentials_ref",
Zone: v1alpha1.CloudStackZone{
Name: "name",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
},
Domain: "domain",
Account: "account",
ManagementApiEndpoint: "management_api_endpoint",
},
}
nw.Spec.AvailabilityZones = append(
[]v1alpha1.CloudStackAvailabilityZone{},
old.Spec.AvailabilityZones...,
)
nw.Spec.AvailabilityZones[0].Account = "new_account"
},
Expect: true,
},
{
Name: "Machine_Symlinks_Add",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.Symlinks = map[string]string{
"foo": "bar",
}
nw.Spec.Symlinks = map[string]string{
"foo": "bar",
"qux": "baz",
}
},
Expect: true,
},
{
Name: "Machine_Symlinks_Remove",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.Symlinks = map[string]string{
"foo": "bar",
}
nw.Spec.Symlinks = map[string]string{}
},
Expect: true,
},
{
Name: "Machine_Symlinks_Changed",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.Symlinks = map[string]string{
"foo": "bar",
"qux": "baz",
}
nw.Spec.Symlinks = map[string]string{
"foo": "bar_changed",
"qux": "baz_changed",
}
},
Expect: true,
},
{
Name: "Machine_DiskOffering_NameChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "name",
},
}
nw.Spec.DiskOffering = old.Spec.DiskOffering.DeepCopy()
nw.Spec.DiskOffering.Name = "name_changed"
},
Expect: true,
},
{
Name: "Machine_DiskOffering_IDChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "id",
},
}
nw.Spec.DiskOffering = old.Spec.DiskOffering.DeepCopy()
nw.Spec.DiskOffering.Id = "id_changed"
},
Expect: true,
},
{
Name: "Machine_DiskOffering_SizeChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "id",
},
CustomSize: 1,
}
nw.Spec.DiskOffering = old.Spec.DiskOffering.DeepCopy()
nw.Spec.DiskOffering.CustomSize = 2
},
Expect: true,
},
{
Name: "Machine_DiskOffering_MountPathChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "id",
},
MountPath: "mount_path",
}
nw.Spec.DiskOffering = old.Spec.DiskOffering.DeepCopy()
nw.Spec.DiskOffering.MountPath = "new_mount_path"
},
Expect: true,
},
{
Name: "Machine_DiskOffering_DeviceChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "id",
},
Device: "device",
}
nw.Spec.DiskOffering = old.Spec.DiskOffering.DeepCopy()
nw.Spec.DiskOffering.Device = "new_device_path"
},
Expect: true,
},
{
Name: "Machine_DiskOffering_FilesystemChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "id",
},
Filesystem: "filesystem",
}
nw.Spec.DiskOffering = old.Spec.DiskOffering.DeepCopy()
nw.Spec.DiskOffering.Filesystem = "new_filesystem"
},
Expect: true,
},
{
Name: "Machine_DiskOffering_LabelChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "id",
},
Label: "label",
}
nw.Spec.DiskOffering = old.Spec.DiskOffering.DeepCopy()
nw.Spec.DiskOffering.Label = "new_label"
},
Expect: true,
},
{
Name: "Machine_DiskOffering_ToNil",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
MountPath: "test",
Device: "test",
Filesystem: "test",
}
nw.Spec.DiskOffering = nil
},
Expect: true,
},
{
Name: "Machine_DiskOffering_ToZeroValue",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
MountPath: "test",
Device: "test",
Filesystem: "test",
}
nw.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{}
},
Expect: true,
},
{
Name: "Machine_DiskOffering_Nil",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.DiskOffering = nil
nw.Spec.DiskOffering = nil
},
Expect: false,
},
{
Name: "Machine_ComputeOffering_NewID",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{}
nw.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Id: "test",
}
},
Expect: true,
},
{
Name: "Machine_ComputeOffering_NewName",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{}
nw.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Name: "test",
}
},
Expect: true,
},
{
Name: "Machine_ComputeOffering_IDChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Id: "test",
}
nw.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Id: "changed",
}
},
Expect: true,
},
{
Name: "Machine_ComputeOffering_NameChanged",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Name: "test",
}
nw.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Name: "changed",
}
},
Expect: true,
},
{
Name: "Machine_ComputeOffering_ToZeroValue",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Id: "test",
}
nw.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{}
},
Expect: true,
},
{
Name: "Machine_UserCustomDetails_Add",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.UserCustomDetails = map[string]string{}
nw.Spec.UserCustomDetails = map[string]string{
"foo": "bar",
}
},
Expect: true,
},
{
Name: "Machine_UserCustomDetails_Remove",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.UserCustomDetails = map[string]string{
"foo": "bar",
}
nw.Spec.UserCustomDetails = map[string]string{}
},
Expect: true,
},
{
Name: "Machine_UserCustomDetails_ToNil",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.UserCustomDetails = map[string]string{
"foo": "bar",
}
nw.Spec.UserCustomDetails = nil
},
Expect: true,
},
{
Name: "Machine_UserCustomDetails_Replace",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.UserCustomDetails = map[string]string{
"foo": "bar",
}
nw.Spec.UserCustomDetails = map[string]string{
"qux": "baz",
}
},
Expect: true,
},
{
Name: "Machine_UserCustomDetails_ReplaceEmptyValue",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.UserCustomDetails = map[string]string{
"foo": "",
"qux": "baz",
}
nw.Spec.UserCustomDetails = map[string]string{
"bar": "",
"qux": "baz",
}
},
Expect: true,
},
{
Name: "Machine_UserCustomDetails_ReplaceEmptyValue",
ConfigureMachines: func(old, nw *v1alpha1.CloudStackMachineConfig) {
old.Spec.UserCustomDetails = map[string]string{
"foo": "",
"qux": "baz",
}
nw.Spec.UserCustomDetails = map[string]string{
"bar": "",
"qux": "baz",
}
},
Expect: true,
},
} {
t.Run(tc.Name, func(t *testing.T) {
oldDatacenter := givenDatacenterConfig(t, testClusterConfigMainFilename)
newDatacenter := oldDatacenter.DeepCopy()
if tc.ConfigureDatacenter != nil {
tc.ConfigureDatacenter(oldDatacenter, newDatacenter)
}
oldMachines := givenMachineConfigs(t, testClusterConfigMainFilename)
oldMachine, newMachine := oldMachines["test"], oldMachines["test"].DeepCopy()
if tc.ConfigureMachines != nil {
tc.ConfigureMachines(oldMachine, newMachine)
}
result := NeedNewMachineTemplate(
oldDatacenter,
newDatacenter,
oldMachine,
newMachine,
test.NewNullLogger(),
)
if result != tc.Expect {
t.Fatalf("Expected: %v; Received: %v", tc.Expect, result)
}
})
}
}
func TestInstallCustomProviderComponentsKubeVipEnabled(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cc := givenClusterConfig(t, testClusterConfigMainFilename)
fillClusterSpecWithClusterConfig(clusterSpec, cc)
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
provider := newProviderWithKubectl(t, dcConfig, cc, kubectl, nil)
kubeConfigFile := "test"
oldCloudstackKubeVipDisabledVal := os.Getenv(features.CloudStackKubeVipDisabledEnvVar)
os.Unsetenv(features.CloudStackKubeVipDisabledEnvVar)
defer os.Setenv(features.CloudStackKubeVipDisabledEnvVar, oldCloudstackKubeVipDisabledVal)
kubectl.EXPECT().SetEksaControllerEnvVar(ctx, features.CloudStackKubeVipDisabledEnvVar, "false", kubeConfigFile).Return(nil)
if err := provider.InstallCustomProviderComponents(ctx, kubeConfigFile); err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestNeedsNewWorkloadTemplateK8sVersion(t *testing.T) {
oldSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newK8sSpec := oldSpec.DeepCopy()
newK8sSpec.Cluster.Spec.KubernetesVersion = "1.25"
assert.True(t, NeedsNewWorkloadTemplate(oldSpec, newK8sSpec, nil, nil, nil, nil, test.NewNullLogger()))
}
func TestNeedsNewWorkloadTemplateBundleNumber(t *testing.T) {
oldSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newK8sSpec := oldSpec.DeepCopy()
newK8sSpec.Bundles.Spec.Number = 10000
assert.True(t, NeedsNewWorkloadTemplate(oldSpec, newK8sSpec, nil, nil, nil, nil, test.NewNullLogger()))
}
func TestProviderUpdateSecrets(t *testing.T) {
tests := []struct {
testName string
configPath string
expectedSecretsYamlPath string
getSecretError error
applyError error
wantErr bool
}{
{
testName: "valid single profile",
configPath: defaultCloudStackCloudConfigPath,
expectedSecretsYamlPath: "testdata/expected_secrets_single.yaml",
getSecretError: notFoundError,
applyError: nil,
wantErr: false,
},
{
testName: "valid multiple profiles",
configPath: "testdata/cloudstack_config_multiple_profiles.ini",
expectedSecretsYamlPath: "testdata/expected_secrets_multiple.yaml",
getSecretError: notFoundError,
applyError: nil,
wantErr: false,
},
{
testName: "secret already present",
configPath: defaultCloudStackCloudConfigPath,
expectedSecretsYamlPath: "testdata/expected_secrets_single.yaml",
getSecretError: nil,
applyError: nil,
wantErr: false,
},
{
testName: "valid single profile",
configPath: defaultCloudStackCloudConfigPath,
expectedSecretsYamlPath: "testdata/expected_secrets_single.yaml",
getSecretError: notFoundError,
applyError: errors.New("exception"),
wantErr: true,
},
}
for _, test := range tests {
t.Run(test.testName, func(t *testing.T) {
tt := NewWithT(t)
mockCtrl := gomock.NewController(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)
if provider == nil {
t.Fatalf("provider object is nil")
}
saveContext(t, test.configPath)
expectedSecretsYaml, err := configFS.ReadFile(test.expectedSecretsYamlPath)
if err != nil {
t.Fatalf("Failed to read embed eksd release: %s", err)
}
kubectl.EXPECT().GetSecretFromNamespace(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil, test.getSecretError)
if test.getSecretError != nil {
kubectl.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), expectedSecretsYaml).Return(test.applyError)
}
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
err = provider.UpdateSecrets(ctx, cluster, nil)
if test.wantErr {
tt.Expect(err).NotTo(BeNil())
} else {
tt.Expect(err).To(BeNil())
}
})
}
}
func TestValidateNewSpecMachineConfigImmutable(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
workerMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
newAffinityGroupIds := [1]string{"different"}
newClusterSpec.CloudStackMachineConfigs[workerMachineConfigName].Spec.AffinityGroupIds = newAffinityGroupIds[:]
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.CloudStackDatacenter, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.CloudStackMachineConfigs[workerMachineConfigName], nil).AnyTimes()
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.ErrorContains(t, err, "field is immutable")
}
func TestValidateNewSpecMachineConfigNotFound(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
workerMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
apierr := apierrors.NewNotFound(schema.GroupResource{}, workerMachineConfigName)
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaCloudStackDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.CloudStackDatacenter, nil)
kubectl.EXPECT().GetEksaCloudStackMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(nil, apierr).AnyTimes()
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.ErrorContains(t, err, "not found")
}
| 2,549 |
eks-anywhere | aws | Go | package cloudstack
import (
"context"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
yamlcapi "github.com/aws/eks-anywhere/pkg/clusterapi/yaml"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
// BaseControlPlane represents a CAPI CloudStack control plane.
type BaseControlPlane = clusterapi.ControlPlane[*cloudstackv1.CloudStackCluster, *cloudstackv1.CloudStackMachineTemplate]
// ControlPlane holds the CloudStack specific objects for a CAPI CloudStack control plane.
type ControlPlane struct {
BaseControlPlane
}
// Objects returns the control plane objects associated with the CloudStack cluster.
func (p ControlPlane) Objects() []kubernetes.Object {
o := p.BaseControlPlane.Objects()
return o
}
// ControlPlaneSpec builds a CloudStack ControlPlane definition based on an eks-a cluster spec.
func ControlPlaneSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, clusterSpec *cluster.Spec) (*ControlPlane, error) {
templateBuilder := NewTemplateBuilder(time.Now)
controlPlaneYaml, err := templateBuilder.GenerateCAPISpecControlPlane(
clusterSpec,
func(values map[string]interface{}) {
values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster)
values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(clusterSpec.Cluster)
},
)
if err != nil {
return nil, errors.Wrap(err, "generating cloudstack control plane yaml spec")
}
parser, builder, err := newControlPlaneParser(logger)
if err != nil {
return nil, err
}
err = parser.Parse(controlPlaneYaml, builder)
if err != nil {
return nil, errors.Wrap(err, "parsing cloudstack control plane yaml")
}
cp := builder.ControlPlane
if err = cp.UpdateImmutableObjectNames(ctx, client, GetMachineTemplate, machineTemplateEqual); err != nil {
return nil, errors.Wrap(err, "updating cloudstack immutable object names")
}
return cp, nil
}
// ControlPlaneBuilder defines the builder for all objects in the CAPI CloudStack control plane.
type controlPlaneBuilder struct {
BaseBuilder *yamlcapi.ControlPlaneBuilder[*cloudstackv1.CloudStackCluster, *cloudstackv1.CloudStackMachineTemplate]
ControlPlane *ControlPlane
}
// BuildFromParsed implements the base yamlcapi.BuildFromParsed and processes any additional objects for the CloudStack control plane.
func (b *controlPlaneBuilder) BuildFromParsed(lookup yamlutil.ObjectLookup) error {
if err := b.BaseBuilder.BuildFromParsed(lookup); err != nil {
return err
}
b.ControlPlane.BaseControlPlane = *b.BaseBuilder.ControlPlane
return nil
}
func newControlPlaneParser(logger logr.Logger) (*yamlutil.Parser, *controlPlaneBuilder, error) {
parser, baseBuilder, err := yamlcapi.NewControlPlaneParserAndBuilder(
logger,
yamlutil.NewMapping(
"CloudStackCluster",
func() *cloudstackv1.CloudStackCluster {
return &cloudstackv1.CloudStackCluster{}
},
),
yamlutil.NewMapping(
"CloudStackMachineTemplate",
func() *cloudstackv1.CloudStackMachineTemplate {
return &cloudstackv1.CloudStackMachineTemplate{}
},
),
)
if err != nil {
return nil, nil, errors.Wrap(err, "building cloudstack control plane parser")
}
builder := &controlPlaneBuilder{
BaseBuilder: baseBuilder,
ControlPlane: &ControlPlane{},
}
return parser, builder, nil
}
| 109 |
eks-anywhere | aws | Go | package cloudstack
import (
"context"
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
const (
testClusterConfigFilename = "testdata/cluster_main.yaml"
)
func TestControlPlaneObjects(t *testing.T) {
tests := []struct {
name string
controlPlane *ControlPlane
expected []kubernetes.Object
}{
{
name: "stacked etcd",
controlPlane: &ControlPlane{
BaseControlPlane: BaseControlPlane{
Cluster: capiCluster(),
ProviderCluster: cloudstackCluster(),
KubeadmControlPlane: kubeadmControlPlane(),
ControlPlaneMachineTemplate: cloudstackMachineTemplate("controlplane-machinetemplate"),
},
},
expected: []kubernetes.Object{
capiCluster(),
cloudstackCluster(),
kubeadmControlPlane(),
cloudstackMachineTemplate("controlplane-machinetemplate"),
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tc.controlPlane.Objects()).To(ConsistOf(tc.expected))
})
}
}
func TestControlPlaneSpecNewCluster(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane()))
g.Expect(cp.ProviderCluster).To(Equal(cloudstackCluster()))
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("test-control-plane-1"))
}
func TestControlPlaneSpecNoKubeVersion(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
spec.Cluster.Spec.KubernetesVersion = ""
_, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).To(MatchError(ContainSubstring("generating cloudstack control plane yaml spec")))
}
func TestControlPlaneSpecNoChangesMachineTemplates(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
originalKCP := kubeadmControlPlane()
originalCPMachineTemplate := cloudstackMachineTemplate("test-control-plane-1")
expectedKCP := originalKCP.DeepCopy()
expectedCPtemplate := originalCPMachineTemplate.DeepCopy()
client := test.NewFakeKubeClient(
originalKCP,
originalCPMachineTemplate,
)
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(expectedKCP))
g.Expect(cp.ProviderCluster).To(Equal(cloudstackCluster()))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(expectedCPtemplate))
}
func TestControlPlaneSpecUpdateMachineTemplates(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
originalKubeadmControlPlane := kubeadmControlPlane()
originalCPMachineTemplate := cloudstackMachineTemplate("test-control-plane")
expectedKCP := originalKubeadmControlPlane.DeepCopy()
expectedCPTemplate := originalCPMachineTemplate.DeepCopy()
client := test.NewFakeKubeClient(
originalKubeadmControlPlane,
originalCPMachineTemplate,
)
cpTaints := []corev1.Taint{
{
Key: "foo",
Value: "bar",
Effect: "PreferNoSchedule",
},
}
spec.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints
expectedKCP.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.Taints = cpTaints
expectedKCP.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.Taints = cpTaints
expectedCPTemplate.Name = "test-control-plane-1"
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(expectedKCP))
g.Expect(cp.ProviderCluster).To(Equal(cloudstackCluster()))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(expectedCPTemplate))
}
func TestControlPlaneSpecRegistryMirrorConfiguration(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
tests := []struct {
name string
mirrorConfig *anywherev1.RegistryMirrorConfiguration
files []bootstrapv1.File
}{
{
name: "insecure skip verify",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerify(),
},
{
name: "insecure skip verify with ca cert",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
spec.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Spec.KubeadmConfigSpec.Files = append(kcp.Spec.KubeadmConfigSpec.Files, tt.files...)
precmds := []string{"swapoff -a"}
precmds = append(precmds, test.RegistryMirrorSudoPreKubeadmCommands()...)
precmds2 := []string{
"hostname \"{{ ds.meta_data.hostname }}\"",
"echo \"::1 ipv6-localhost ipv6-loopback\" >/etc/hosts",
"echo \"127.0.0.1 localhost\" >>/etc/hosts",
"echo \"127.0.0.1 {{ ds.meta_data.hostname }}\" >>/etc/hosts",
"echo \"{{ ds.meta_data.hostname }}\" >/etc/hostname",
"if [ ! -L /var/log/kubernetes ] ;\n then\n mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ;\n mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ;\n else echo \"/var/log/kubernetes already symlnk\";\nfi",
}
kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(precmds, precmds2...)
})))
g.Expect(cp.ProviderCluster).To(Equal(cloudstackCluster()))
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("test-control-plane-1"))
})
}
}
func capiCluster() *clusterv1.Cluster {
return &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: constants.EksaSystemNamespace,
Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test"},
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetwork{
APIServerPort: nil,
Services: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"10.96.0.0/12"},
},
Pods: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"192.168.0.0/16"},
},
},
ControlPlaneEndpoint: clusterv1.APIEndpoint{
Host: "1.2.3.4",
Port: 6443,
},
ControlPlaneRef: &corev1.ObjectReference{
Kind: "KubeadmControlPlane",
Name: "test",
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
ManagedExternalEtcdRef: &corev1.ObjectReference{
Kind: "EtcdadmCluster",
Name: "test-etcd",
APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1",
},
InfrastructureRef: &corev1.ObjectReference{
Kind: "CloudStackCluster",
Name: "test",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
},
},
}
}
func cloudstackCluster() *cloudstackv1.CloudStackCluster {
return &cloudstackv1.CloudStackCluster{
TypeMeta: metav1.TypeMeta{
Kind: "CloudStackCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: constants.EksaSystemNamespace,
},
Spec: cloudstackv1.CloudStackClusterSpec{
FailureDomains: []cloudstackv1.CloudStackFailureDomainSpec{
{
Name: "default-az-0",
Zone: cloudstackv1.CloudStackZoneSpec{
Name: "zone1",
ID: "",
Network: cloudstackv1.Network{ID: "", Type: "", Name: "net1"},
},
Account: "admin",
Domain: "domain1",
ACSEndpoint: corev1.SecretReference{
Name: "global",
Namespace: "eksa-system",
},
},
},
ControlPlaneEndpoint: clusterv1.APIEndpoint{
Host: "1.2.3.4",
Port: 6443,
},
},
}
}
func kubeadmControlPlane(opts ...func(*controlplanev1.KubeadmControlPlane)) *controlplanev1.KubeadmControlPlane {
kcp := &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: constants.EksaSystemNamespace,
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: corev1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
Kind: "CloudStackMachineTemplate",
Name: "test-control-plane-1",
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes",
Etcd: bootstrapv1.Etcd{
External: &bootstrapv1.ExternalEtcd{
Endpoints: []string{},
CAFile: "/etc/kubernetes/pki/etcd/ca.crt",
CertFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt",
KeyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key",
},
},
DNS: bootstrapv1.DNS{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/coredns",
ImageTag: "v1.8.3-eks-1-21-4",
},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{
"audit-log-maxage": "30",
"audit-log-maxbackup": "10",
"profiling": "false",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"audit-log-maxsize": "512",
"audit-log-path": "/var/log/kubernetes/api-audit.log",
"audit-policy-file": "/etc/kubernetes/audit-policy.yaml",
"cloud-provider": "external",
},
ExtraVolumes: []bootstrapv1.HostPathMount{
{
HostPath: "/etc/kubernetes/audit-policy.yaml",
MountPath: "/etc/kubernetes/audit-policy.yaml",
Name: "audit-policy",
PathType: "File",
ReadOnly: true,
},
{
HostPath: "/var/log/kubernetes",
MountPath: "/var/log/kubernetes",
Name: "audit-log-dir",
PathType: "DirectoryOrCreate",
ReadOnly: false,
},
},
},
},
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{
"cloud-provider": "external",
"profiling": "false",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
Scheduler: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{
"profiling": "false",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
Files: []bootstrapv1.File{
{
Path: "/etc/kubernetes/manifests/kube-vip.yaml",
Owner: "root:root",
Permissions: "",
Encoding: "",
Append: false,
Content: "apiVersion: v1\nkind: Pod\nmetadata:\n creationTimestamp: null\n name: kube-vip\n namespace: kube-system\nspec:\n containers:\n - args:\n - manager\n env:\n - name: vip_arp\n value: \"true\"\n - name: port\n value: \"6443\"\n - name: vip_cidr\n value: \"32\"\n - name: cp_enable\n value: \"true\"\n - name: cp_namespace\n value: kube-system\n - name: vip_ddns\n value: \"false\"\n - name: vip_leaderelection\n value: \"true\"\n - name: vip_leaseduration\n value: \"15\"\n - name: vip_renewdeadline\n value: \"10\"\n - name: vip_retryperiod\n value: \"2\"\n - name: address\n value: 1.2.3.4\n image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158\n imagePullPolicy: IfNotPresent\n name: kube-vip\n resources: {}\n securityContext:\n capabilities:\n add:\n - NET_ADMIN\n - NET_RAW\n volumeMounts:\n - mountPath: /etc/kubernetes/admin.conf\n name: kubeconfig\n hostNetwork: true\n volumes:\n - hostPath:\n path: /etc/kubernetes/admin.conf\n name: kubeconfig\nstatus: {}\n",
},
{
Path: "/etc/kubernetes/audit-policy.yaml",
Owner: "root:root",
Content: `apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
# Log aws-auth configmap changes
- level: RequestResponse
namespaces: ["kube-system"]
verbs: ["update", "patch", "delete"]
resources:
- group: "" # core
resources: ["configmaps"]
resourceNames: ["aws-auth"]
omitStages:
- "RequestReceived"
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
- level: Request
resources:
- group: ""
resources: ["serviceaccounts/token"]
# Get repsonses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
`,
},
},
InitConfiguration: &bootstrapv1.InitConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
Name: "{{ ds.meta_data.hostname }}",
CRISocket: "/var/run/containerd/containerd.sock",
KubeletExtraArgs: map[string]string{
"read-only-port": "0",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"anonymous-auth": "false",
"provider-id": "cloudstack:///'{{ ds.meta_data.instance_id }}'",
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
Name: "{{ ds.meta_data.hostname }}",
CRISocket: "/var/run/containerd/containerd.sock",
KubeletExtraArgs: map[string]string{
"provider-id": "cloudstack:///'{{ ds.meta_data.instance_id }}'",
"read-only-port": "0",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"anonymous-auth": "false",
},
},
},
DiskSetup: &bootstrapv1.DiskSetup{
Partitions: []bootstrapv1.Partition{
{Device: "/dev/vdb", Layout: true, Overwrite: ptr.Bool(false), TableType: ptr.String("gpt")},
},
Filesystems: []bootstrapv1.Filesystem{
{
Device: "/dev/vdb1",
Filesystem: "ext4",
Label: "data_disk",
Partition: nil,
Overwrite: ptr.Bool(false),
ReplaceFS: nil,
ExtraOpts: []string{
"-E",
"lazy_itable_init=1,lazy_journal_init=1",
},
},
},
},
Mounts: []bootstrapv1.MountPoints{
[]string{"LABEL=data_disk", "/data-small"},
},
PreKubeadmCommands: []string{
"swapoff -a",
"hostname \"{{ ds.meta_data.hostname }}\"",
"echo \"::1 ipv6-localhost ipv6-loopback\" >/etc/hosts",
"echo \"127.0.0.1 localhost\" >>/etc/hosts",
"echo \"127.0.0.1 {{ ds.meta_data.hostname }}\" >>/etc/hosts",
"echo \"{{ ds.meta_data.hostname }}\" >/etc/hostname",
"if [ ! -L /var/log/kubernetes ] ;\n then\n mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ;\n mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ;\n else echo \"/var/log/kubernetes already symlnk\";\nfi",
},
Users: []bootstrapv1.User{
{
Name: "mySshUsername",
Sudo: ptr.String("ALL=(ALL) NOPASSWD:ALL"),
SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
Format: "cloud-config",
UseExperimentalRetryJoin: true,
},
Replicas: ptr.Int32(3),
Version: "v1.21.2-eks-1-21-4",
},
}
for _, opt := range opts {
opt(kcp)
}
return kcp
}
func cloudstackMachineTemplate(name string) *cloudstackv1.CloudStackMachineTemplate {
return &cloudstackv1.CloudStackMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "CloudStackMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.EksaSystemNamespace,
Annotations: map[string]string{
"mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1": "/data-small",
"symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1": "/var/log/kubernetes:/data-small/var/log/kubernetes",
"device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1": "/dev/vdb",
"filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1": "ext4",
"label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1": "data_disk",
},
},
Spec: cloudstackv1.CloudStackMachineTemplateSpec{
Spec: cloudstackv1.CloudStackMachineTemplateResource{
Spec: cloudstackv1.CloudStackMachineSpec{
Template: cloudstackv1.CloudStackResourceIdentifier{
Name: "centos7-k8s-118",
},
Offering: cloudstackv1.CloudStackResourceIdentifier{
Name: "m4-large",
},
DiskOffering: cloudstackv1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: cloudstackv1.CloudStackResourceIdentifier{ID: "", Name: "Small"},
MountPath: "/data-small",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
AffinityGroupIDs: []string{
"control-plane-anti-affinity",
},
},
},
},
}
}
| 642 |
eks-anywhere | aws | Go | package cloudstack
import (
"context"
apiv1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/collection"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
// GetCloudstackExecConfig gets cloudstack exec config from secrets.
func GetCloudstackExecConfig(ctx context.Context, cli client.Client, datacenterConfig *v1alpha1.CloudStackDatacenterConfig) (*decoder.CloudStackExecConfig, error) {
var profiles []decoder.CloudStackProfileConfig
credRefs := collection.NewSet[string]()
for _, zone := range datacenterConfig.Spec.AvailabilityZones {
credRefs.Add(zone.CredentialsRef)
}
for _, profileName := range credRefs.ToSlice() {
secret := &apiv1.Secret{}
secretKey := client.ObjectKey{
Namespace: constants.EksaSystemNamespace,
Name: profileName,
}
if err := cli.Get(ctx, secretKey, secret); err != nil {
return nil, err
}
profiles = append(profiles, decoder.CloudStackProfileConfig{
Name: profileName,
ApiKey: string(secret.Data[decoder.APIKeyKey]),
SecretKey: string(secret.Data[decoder.SecretKeyKey]),
ManagementUrl: string(secret.Data[decoder.APIUrlKey]),
VerifySsl: string(secret.Data[decoder.VerifySslKey]),
})
}
return &decoder.CloudStackExecConfig{
Profiles: profiles,
}, nil
}
| 44 |
eks-anywhere | aws | Go | package cloudstack
import (
"context"
"testing"
. "github.com/onsi/gomega"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
var (
name = "test-cluster"
namespace = "eksa-system"
)
func TestGetCloudstackExecConfigMultipleProfile(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
dcConfig.Spec.AvailabilityZones = append(dcConfig.Spec.AvailabilityZones, anywherev1.CloudStackAvailabilityZone{
Name: "testAz-2",
CredentialsRef: "testCred2",
Zone: anywherev1.CloudStackZone{
Name: "zone1",
Network: anywherev1.CloudStackResourceIdentifier{
Name: "SharedNet1",
},
},
Domain: "testDomain",
Account: "testAccount",
ManagementApiEndpoint: "testApiEndpoint",
})
secret1 := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "testCred",
Namespace: constants.EksaSystemNamespace,
},
Data: map[string][]byte{
decoder.APIKeyKey: []byte("test-key1"),
decoder.APIUrlKey: []byte("http://1.1.1.1:8080/client/api"),
decoder.SecretKeyKey: []byte("test-secret1"),
},
}
secret2 := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "testCred2",
Namespace: constants.EksaSystemNamespace,
},
Data: map[string][]byte{
decoder.APIKeyKey: []byte("test-key2"),
decoder.APIUrlKey: []byte("http://1.1.1.1:8081/client/api"),
decoder.SecretKeyKey: []byte("test-secret2"),
},
}
objs := []runtime.Object{dcConfig, secret1, secret2}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
expectedProfile1 := decoder.CloudStackProfileConfig{
Name: "testCred",
ApiKey: "test-key1",
SecretKey: "test-secret1",
ManagementUrl: "http://1.1.1.1:8080/client/api",
}
expectedProfile2 := decoder.CloudStackProfileConfig{
Name: "testCred2",
ApiKey: "test-key2",
SecretKey: "test-secret2",
ManagementUrl: "http://1.1.1.1:8081/client/api",
}
expectedExecConfig := &decoder.CloudStackExecConfig{
Profiles: []decoder.CloudStackProfileConfig{expectedProfile1, expectedProfile2},
}
gotExecConfig, err := GetCloudstackExecConfig(ctx, client, dcConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(gotExecConfig.Profiles)).To(Equal(len(expectedExecConfig.Profiles)))
g.Expect(gotExecConfig.Profiles).To(ContainElements(expectedProfile1))
g.Expect(gotExecConfig.Profiles).To(ContainElements(expectedProfile2))
}
func TestGetCloudstackExecConfigFail(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
objs := []runtime.Object{dcConfig}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
gotExecConfig, err := GetCloudstackExecConfig(ctx, client, dcConfig)
g.Expect(err).To(MatchError(ContainSubstring("secrets \"testCred\" not found")))
g.Expect(gotExecConfig).To(BeNil())
}
func createCloudstackDatacenterConfig() *anywherev1.CloudStackDatacenterConfig {
return &anywherev1.CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.CloudStackDatacenterKind,
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: anywherev1.CloudStackDatacenterConfigSpec{
AvailabilityZones: []anywherev1.CloudStackAvailabilityZone{
{
Name: "testAz",
CredentialsRef: "testCred",
Zone: anywherev1.CloudStackZone{
Name: "zone1",
Network: anywherev1.CloudStackResourceIdentifier{
Name: "SharedNet1",
},
},
Domain: "testDomain",
Account: "testAccount",
ManagementApiEndpoint: "testApiEndpoint",
},
},
},
}
}
| 131 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.