repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package registry
import (
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
"oras.land/oras-go/v2/registry/remote/auth"
)
// CredentialStore for registry credentials such as ~/.docker/config.json.
type CredentialStore struct {
directory string
configFile *configfile.ConfigFile
}
// NewCredentialStore create a credential store.
func NewCredentialStore() *CredentialStore {
return &CredentialStore{
directory: config.Dir(),
}
}
// SetDirectory override default directory.
func (cs *CredentialStore) SetDirectory(directory string) {
cs.directory = directory
}
// Init initialize a credential store.
func (cs *CredentialStore) Init() (err error) {
cs.configFile, err = config.Load(cs.directory)
if err != nil {
return err
}
if !cs.configFile.ContainsAuth() {
cs.configFile.CredentialsStore = credentials.DetectDefaultStore(cs.configFile.CredentialsStore)
}
return nil
}
// Credential get an authentication credential for a given registry.
func (cs *CredentialStore) Credential(registry string) (auth.Credential, error) {
authConf, err := cs.configFile.GetCredentialsStore(registry).Get(registry)
if err != nil {
return auth.EmptyCredential, err
}
cred := auth.Credential{
Username: authConf.Username,
Password: authConf.Password,
AccessToken: authConf.RegistryToken,
RefreshToken: authConf.IdentityToken,
}
return cred, nil
}
| 54 |
eks-anywhere | aws | Go | package registry_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/registry"
)
func TestCredentialStore_Init(t *testing.T) {
credentialStore := registry.NewCredentialStore()
credentialStore.SetDirectory("testdata")
err := credentialStore.Init()
assert.NoError(t, err)
result, err := credentialStore.Credential("localhost")
assert.NoError(t, err)
assert.Equal(t, "user", result.Username)
assert.Equal(t, "pass", result.Password)
assert.Equal(t, "", result.AccessToken)
assert.Equal(t, "", result.RefreshToken)
result, err = credentialStore.Credential("harbor.eksa.demo:30003")
assert.NoError(t, err)
assert.Equal(t, "captain", result.Username)
assert.Equal(t, "haddock", result.Password)
assert.Equal(t, "", result.AccessToken)
assert.Equal(t, "", result.RefreshToken)
result, err = credentialStore.Credential("bogus")
assert.NoError(t, err)
assert.Equal(t, "", result.Username)
assert.Equal(t, "", result.Password)
assert.Equal(t, "", result.AccessToken)
assert.Equal(t, "", result.RefreshToken)
result, err = credentialStore.Credential("5551212.dkr.ecr.us-west-2.amazonaws.com")
// This is a generic error, so using errors.Is won't work, and this is as
// much of the string as we can reliably match against in a cross-platform
// fashion. Until they change it, then everything will break.
assert.ErrorContains(t, err, "error getting credentials - err")
assert.Equal(t, "", result.Username)
assert.Equal(t, "", result.Password)
assert.Equal(t, "", result.AccessToken)
assert.Equal(t, "", result.RefreshToken)
}
func TestCredentialStore_InitEmpty(t *testing.T) {
credentialStore := registry.NewCredentialStore()
credentialStore.SetDirectory("testdata/empty")
err := credentialStore.Init()
assert.NoError(t, err)
}
| 56 |
eks-anywhere | aws | Go | package registry
import (
"context"
"encoding/json"
"fmt"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// PullBytes a resource from the registry.
func PullBytes(ctx context.Context, sc StorageClient, artifact Artifact) (data []byte, err error) {
srcStorage, err := sc.GetStorage(ctx, artifact)
if err != nil {
return nil, fmt.Errorf("repository source: %v", err)
}
_, data, err = sc.FetchBytes(ctx, srcStorage, artifact)
if err != nil {
return nil, fmt.Errorf("fetch manifest: %v", err)
}
var mani ocispec.Manifest
if err := json.Unmarshal(data, &mani); err != nil {
return nil, fmt.Errorf("unmarshal manifest: %v", err)
}
if len(mani.Layers) < 1 {
return nil, fmt.Errorf("missing layer")
}
data, err = sc.FetchBlob(ctx, srcStorage, mani.Layers[0])
if err != nil {
return nil, fmt.Errorf("fetch blob: %v", err)
}
return data, err
}
| 37 |
eks-anywhere | aws | Go | package registry_test
import (
_ "embed"
"fmt"
"testing"
"github.com/golang/mock/gomock"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/registry"
"github.com/aws/eks-anywhere/pkg/registry/mocks"
)
//go:embed testdata/image-manifest.json
var imageManifest []byte
//go:embed testdata/bad-image-manifest.json
var badImageManifest []byte
//go:embed testdata/no-layer-image-manifest.json
var noLayersImageManifest []byte
//go:embed testdata/package-bundle.yaml
var packageBundle []byte
func TestPull(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
desc := ocispec.Descriptor{
Digest: "sha256:8bc5f46db8c98aedfba4ade0d7ebbdecd8e4130e172d3d62871fc3258c40a910",
}
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
srcClient.EXPECT().FetchBytes(ctx, &mockSrcRepo, srcArtifact).Return(desc, imageManifest, nil)
srcClient.EXPECT().FetchBlob(ctx, &mockSrcRepo, gomock.Any()).Return(packageBundle, nil)
result, err := registry.PullBytes(ctx, srcClient, srcArtifact)
assert.NotEmpty(t, result)
assert.NoError(t, err)
}
func TestPullFetchBlobFail(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
desc := ocispec.Descriptor{
Digest: "sha256:8bc5f46db8c98aedfba4ade0d7ebbdecd8e4130e172d3d62871fc3258c40a910",
}
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
srcClient.EXPECT().FetchBytes(ctx, &mockSrcRepo, srcArtifact).Return(desc, imageManifest, nil)
srcClient.EXPECT().FetchBlob(ctx, &mockSrcRepo, gomock.Any()).Return(packageBundle, fmt.Errorf("oops"))
result, err := registry.PullBytes(ctx, srcClient, srcArtifact)
assert.Nil(t, result)
assert.EqualError(t, err, "fetch blob: oops")
}
func TestPullUnmarshalFail(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
desc := ocispec.Descriptor{
Digest: "sha256:8bc5f46db8c98aedfba4ade0d7ebbdecd8e4130e172d3d62871fc3258c40a910",
}
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
srcClient.EXPECT().FetchBytes(ctx, &mockSrcRepo, srcArtifact).Return(desc, badImageManifest, nil)
result, err := registry.PullBytes(ctx, srcClient, srcArtifact)
assert.Nil(t, result)
assert.EqualError(t, err, "unmarshal manifest: unexpected end of JSON input")
}
func TestPullNoLayerFail(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
desc := ocispec.Descriptor{
Digest: "sha256:8bc5f46db8c98aedfba4ade0d7ebbdecd8e4130e172d3d62871fc3258c40a910",
}
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
srcClient.EXPECT().FetchBytes(ctx, &mockSrcRepo, srcArtifact).Return(desc, noLayersImageManifest, nil)
result, err := registry.PullBytes(ctx, srcClient, srcArtifact)
assert.Nil(t, result)
assert.EqualError(t, err, "missing layer")
}
func TestPullFetchBytesFail(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
desc := ocispec.Descriptor{
Digest: "sha256:8bc5f46db8c98aedfba4ade0d7ebbdecd8e4130e172d3d62871fc3258c40a910",
}
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
srcClient.EXPECT().FetchBytes(ctx, &mockSrcRepo, srcArtifact).Return(desc, imageManifest, fmt.Errorf("oops"))
result, err := registry.PullBytes(ctx, srcClient, srcArtifact)
assert.Nil(t, result)
assert.EqualError(t, err, "fetch manifest: oops")
}
func TestPullGetStorageFail(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, fmt.Errorf("oops"))
result, err := registry.PullBytes(ctx, srcClient, srcArtifact)
assert.Nil(t, result)
assert.EqualError(t, err, "repository source: oops")
}
| 115 |
eks-anywhere | aws | Go | package registry
import (
"context"
"crypto/x509"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
orasregistry "oras.land/oras-go/v2/registry"
)
// StorageContext describes aspects of a registry.
type StorageContext struct {
host string
project string
credentialStore *CredentialStore
certificates *x509.CertPool
insecure bool
}
// NewStorageContext create registry context.
func NewStorageContext(host string, credentialStore *CredentialStore, certificates *x509.CertPool, insecure bool) StorageContext {
return StorageContext{
host: host,
credentialStore: credentialStore,
certificates: certificates,
insecure: insecure,
}
}
// StorageClient interface for general image storage client.
type StorageClient interface {
Init() error
Resolve(ctx context.Context, srcStorage orasregistry.Repository, versionedImage string) (desc ocispec.Descriptor, err error)
GetStorage(ctx context.Context, image Artifact) (repo orasregistry.Repository, err error)
SetProject(project string)
Destination(image Artifact) string
FetchBytes(ctx context.Context, srcStorage orasregistry.Repository, artifact Artifact) (ocispec.Descriptor, []byte, error)
FetchBlob(ctx context.Context, srcStorage orasregistry.Repository, descriptor ocispec.Descriptor) ([]byte, error)
CopyGraph(ctx context.Context, srcStorage orasregistry.Repository, srcRef string, dstStorage orasregistry.Repository, dstRef string) (ocispec.Descriptor, error)
Tag(ctx context.Context, dstStorage orasregistry.Repository, desc ocispec.Descriptor, tag string) error
}
| 42 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: oras.land/oras-go/v2/registry (interfaces: Repository)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
io "io"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
registry "oras.land/oras-go/v2/registry"
)
// MockRepository is a mock of Repository interface.
type MockRepository struct {
ctrl *gomock.Controller
recorder *MockRepositoryMockRecorder
}
// MockRepositoryMockRecorder is the mock recorder for MockRepository.
type MockRepositoryMockRecorder struct {
mock *MockRepository
}
// NewMockRepository creates a new mock instance.
func NewMockRepository(ctrl *gomock.Controller) *MockRepository {
mock := &MockRepository{ctrl: ctrl}
mock.recorder = &MockRepositoryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockRepository) EXPECT() *MockRepositoryMockRecorder {
return m.recorder
}
// Blobs mocks base method.
func (m *MockRepository) Blobs() registry.BlobStore {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Blobs")
ret0, _ := ret[0].(registry.BlobStore)
return ret0
}
// Blobs indicates an expected call of Blobs.
func (mr *MockRepositoryMockRecorder) Blobs() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Blobs", reflect.TypeOf((*MockRepository)(nil).Blobs))
}
// Delete mocks base method.
func (m *MockRepository) Delete(arg0 context.Context, arg1 v1.Descriptor) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockRepositoryMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockRepository)(nil).Delete), arg0, arg1)
}
// Exists mocks base method.
func (m *MockRepository) Exists(arg0 context.Context, arg1 v1.Descriptor) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Exists indicates an expected call of Exists.
func (mr *MockRepositoryMockRecorder) Exists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockRepository)(nil).Exists), arg0, arg1)
}
// Fetch mocks base method.
func (m *MockRepository) Fetch(arg0 context.Context, arg1 v1.Descriptor) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Fetch", arg0, arg1)
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Fetch indicates an expected call of Fetch.
func (mr *MockRepositoryMockRecorder) Fetch(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockRepository)(nil).Fetch), arg0, arg1)
}
// FetchReference mocks base method.
func (m *MockRepository) FetchReference(arg0 context.Context, arg1 string) (v1.Descriptor, io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchReference", arg0, arg1)
ret0, _ := ret[0].(v1.Descriptor)
ret1, _ := ret[1].(io.ReadCloser)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FetchReference indicates an expected call of FetchReference.
func (mr *MockRepositoryMockRecorder) FetchReference(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchReference", reflect.TypeOf((*MockRepository)(nil).FetchReference), arg0, arg1)
}
// Manifests mocks base method.
func (m *MockRepository) Manifests() registry.ManifestStore {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Manifests")
ret0, _ := ret[0].(registry.ManifestStore)
return ret0
}
// Manifests indicates an expected call of Manifests.
func (mr *MockRepositoryMockRecorder) Manifests() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Manifests", reflect.TypeOf((*MockRepository)(nil).Manifests))
}
// Push mocks base method.
func (m *MockRepository) Push(arg0 context.Context, arg1 v1.Descriptor, arg2 io.Reader) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Push", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Push indicates an expected call of Push.
func (mr *MockRepositoryMockRecorder) Push(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockRepository)(nil).Push), arg0, arg1, arg2)
}
// PushReference mocks base method.
func (m *MockRepository) PushReference(arg0 context.Context, arg1 v1.Descriptor, arg2 io.Reader, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushReference", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// PushReference indicates an expected call of PushReference.
func (mr *MockRepositoryMockRecorder) PushReference(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushReference", reflect.TypeOf((*MockRepository)(nil).PushReference), arg0, arg1, arg2, arg3)
}
// Referrers mocks base method.
func (m *MockRepository) Referrers(arg0 context.Context, arg1 v1.Descriptor, arg2 string, arg3 func([]v1.Descriptor) error) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Referrers", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Referrers indicates an expected call of Referrers.
func (mr *MockRepositoryMockRecorder) Referrers(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Referrers", reflect.TypeOf((*MockRepository)(nil).Referrers), arg0, arg1, arg2, arg3)
}
// Resolve mocks base method.
func (m *MockRepository) Resolve(arg0 context.Context, arg1 string) (v1.Descriptor, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Resolve", arg0, arg1)
ret0, _ := ret[0].(v1.Descriptor)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Resolve indicates an expected call of Resolve.
func (mr *MockRepositoryMockRecorder) Resolve(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Resolve", reflect.TypeOf((*MockRepository)(nil).Resolve), arg0, arg1)
}
// Tag mocks base method.
func (m *MockRepository) Tag(arg0 context.Context, arg1 v1.Descriptor, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Tag", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Tag indicates an expected call of Tag.
func (mr *MockRepositoryMockRecorder) Tag(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tag", reflect.TypeOf((*MockRepository)(nil).Tag), arg0, arg1, arg2)
}
// Tags mocks base method.
func (m *MockRepository) Tags(arg0 context.Context, arg1 string, arg2 func([]string) error) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Tags", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Tags indicates an expected call of Tags.
func (mr *MockRepositoryMockRecorder) Tags(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tags", reflect.TypeOf((*MockRepository)(nil).Tags), arg0, arg1, arg2)
}
| 212 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/registry/storage.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
registry "github.com/aws/eks-anywhere/pkg/registry"
gomock "github.com/golang/mock/gomock"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
registry0 "oras.land/oras-go/v2/registry"
)
// MockStorageClient is a mock of StorageClient interface.
type MockStorageClient struct {
ctrl *gomock.Controller
recorder *MockStorageClientMockRecorder
}
// MockStorageClientMockRecorder is the mock recorder for MockStorageClient.
type MockStorageClientMockRecorder struct {
mock *MockStorageClient
}
// NewMockStorageClient creates a new mock instance.
func NewMockStorageClient(ctrl *gomock.Controller) *MockStorageClient {
mock := &MockStorageClient{ctrl: ctrl}
mock.recorder = &MockStorageClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockStorageClient) EXPECT() *MockStorageClientMockRecorder {
return m.recorder
}
// CopyGraph mocks base method.
func (m *MockStorageClient) CopyGraph(ctx context.Context, srcStorage registry0.Repository, srcRef string, dstStorage registry0.Repository, dstRef string) (v1.Descriptor, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CopyGraph", ctx, srcStorage, srcRef, dstStorage, dstRef)
ret0, _ := ret[0].(v1.Descriptor)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CopyGraph indicates an expected call of CopyGraph.
func (mr *MockStorageClientMockRecorder) CopyGraph(ctx, srcStorage, srcRef, dstStorage, dstRef interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyGraph", reflect.TypeOf((*MockStorageClient)(nil).CopyGraph), ctx, srcStorage, srcRef, dstStorage, dstRef)
}
// Destination mocks base method.
func (m *MockStorageClient) Destination(image registry.Artifact) string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Destination", image)
ret0, _ := ret[0].(string)
return ret0
}
// Destination indicates an expected call of Destination.
func (mr *MockStorageClientMockRecorder) Destination(image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Destination", reflect.TypeOf((*MockStorageClient)(nil).Destination), image)
}
// FetchBlob mocks base method.
func (m *MockStorageClient) FetchBlob(ctx context.Context, srcStorage registry0.Repository, descriptor v1.Descriptor) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchBlob", ctx, srcStorage, descriptor)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FetchBlob indicates an expected call of FetchBlob.
func (mr *MockStorageClientMockRecorder) FetchBlob(ctx, srcStorage, descriptor interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlob", reflect.TypeOf((*MockStorageClient)(nil).FetchBlob), ctx, srcStorage, descriptor)
}
// FetchBytes mocks base method.
func (m *MockStorageClient) FetchBytes(ctx context.Context, srcStorage registry0.Repository, artifact registry.Artifact) (v1.Descriptor, []byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchBytes", ctx, srcStorage, artifact)
ret0, _ := ret[0].(v1.Descriptor)
ret1, _ := ret[1].([]byte)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FetchBytes indicates an expected call of FetchBytes.
func (mr *MockStorageClientMockRecorder) FetchBytes(ctx, srcStorage, artifact interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBytes", reflect.TypeOf((*MockStorageClient)(nil).FetchBytes), ctx, srcStorage, artifact)
}
// GetStorage mocks base method.
func (m *MockStorageClient) GetStorage(ctx context.Context, image registry.Artifact) (registry0.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetStorage", ctx, image)
ret0, _ := ret[0].(registry0.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetStorage indicates an expected call of GetStorage.
func (mr *MockStorageClientMockRecorder) GetStorage(ctx, image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStorage", reflect.TypeOf((*MockStorageClient)(nil).GetStorage), ctx, image)
}
// Init mocks base method.
func (m *MockStorageClient) Init() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Init")
ret0, _ := ret[0].(error)
return ret0
}
// Init indicates an expected call of Init.
func (mr *MockStorageClientMockRecorder) Init() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockStorageClient)(nil).Init))
}
// Resolve mocks base method.
func (m *MockStorageClient) Resolve(ctx context.Context, srcStorage registry0.Repository, versionedImage string) (v1.Descriptor, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Resolve", ctx, srcStorage, versionedImage)
ret0, _ := ret[0].(v1.Descriptor)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Resolve indicates an expected call of Resolve.
func (mr *MockStorageClientMockRecorder) Resolve(ctx, srcStorage, versionedImage interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Resolve", reflect.TypeOf((*MockStorageClient)(nil).Resolve), ctx, srcStorage, versionedImage)
}
// SetProject mocks base method.
func (m *MockStorageClient) SetProject(project string) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetProject", project)
}
// SetProject indicates an expected call of SetProject.
func (mr *MockStorageClientMockRecorder) SetProject(project interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetProject", reflect.TypeOf((*MockStorageClient)(nil).SetProject), project)
}
// Tag mocks base method.
func (m *MockStorageClient) Tag(ctx context.Context, dstStorage registry0.Repository, desc v1.Descriptor, tag string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Tag", ctx, dstStorage, desc, tag)
ret0, _ := ret[0].(error)
return ret0
}
// Tag indicates an expected call of Tag.
func (mr *MockStorageClientMockRecorder) Tag(ctx, dstStorage, desc, tag interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tag", reflect.TypeOf((*MockStorageClient)(nil).Tag), ctx, dstStorage, desc, tag)
}
| 169 |
eks-anywhere | aws | Go | package registrymirror
import (
"net"
urllib "net/url"
"path/filepath"
"regexp"
"strings"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
)
// RegistryMirror configures mirror mappings for artifact registries.
type RegistryMirror struct {
// BaseRegistry is the address of the registry mirror without namespace. Just the host and the port.
BaseRegistry string
// NamespacedRegistryMap stores mirror mappings for artifact registries
NamespacedRegistryMap map[string]string
// Auth should be marked as true if authentication is required for the registry mirror
Auth bool
// CACertContent defines the contents registry mirror CA certificate
CACertContent string
// InsecureSkipVerify skips the registry certificate verification.
// Only use this solution for isolated testing or in a tightly controlled, air-gapped environment.
InsecureSkipVerify bool
}
var re = regexp.MustCompile(constants.DefaultCuratedPackagesRegistryRegex)
// FromCluster is a constructor for RegistryMirror from a cluster schema.
func FromCluster(cluster *v1alpha1.Cluster) *RegistryMirror {
return FromClusterRegistryMirrorConfiguration(cluster.Spec.RegistryMirrorConfiguration)
}
// FromClusterRegistryMirrorConfiguration is a constructor for RegistryMirror from a RegistryMirrorConfiguration schema.
func FromClusterRegistryMirrorConfiguration(config *v1alpha1.RegistryMirrorConfiguration) *RegistryMirror {
if config == nil {
return nil
}
registryMap := make(map[string]string)
base := net.JoinHostPort(config.Endpoint, config.Port)
// add registry mirror base address
// for each namespace, add corresponding endpoint
for _, ociNamespace := range config.OCINamespaces {
mirror := filepath.Join(base, ociNamespace.Namespace)
if re.MatchString(ociNamespace.Registry) {
// handle curated packages in all regions
// static key makes it easier for mirror lookup
registryMap[constants.DefaultCuratedPackagesRegistryRegex] = mirror
} else {
registryMap[ociNamespace.Registry] = mirror
}
}
if len(registryMap) == 0 {
// for backward compatibility, default mapping for public.ecr.aws is added
// when no namespace mapping is specified
registryMap[constants.DefaultCoreEKSARegistry] = base
}
return &RegistryMirror{
BaseRegistry: base,
NamespacedRegistryMap: registryMap,
Auth: config.Authenticate,
CACertContent: config.CACertContent,
InsecureSkipVerify: config.InsecureSkipVerify,
}
}
// CoreEKSAMirror returns the configured mirror for public.ecr.aws.
func (r *RegistryMirror) CoreEKSAMirror() string {
return r.NamespacedRegistryMap[constants.DefaultCoreEKSARegistry]
}
// CuratedPackagesMirror returns the mirror for curated packages.
func (r *RegistryMirror) CuratedPackagesMirror() string {
return r.NamespacedRegistryMap[constants.DefaultCuratedPackagesRegistryRegex]
}
// ReplaceRegistry replaces the host in a url with corresponding registry mirror
// It supports full URLs and container image URLs
// If the provided original url is malformed, there are no guarantees
// that the returned value will be valid
// If no corresponding registry mirror, it will return the original URL.
func (r *RegistryMirror) ReplaceRegistry(url string) string {
if r == nil {
return url
}
u, _ := urllib.Parse(url)
if u.Scheme == "" {
u, _ = urllib.Parse("oci://" + url)
u.Scheme = ""
}
key := u.Host
if re.MatchString(key) {
key = constants.DefaultCuratedPackagesRegistryRegex
}
if v, ok := r.NamespacedRegistryMap[key]; ok {
return strings.Replace(url, u.Host, v, 1)
}
return url
}
| 103 |
eks-anywhere | aws | Go | package registrymirror_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/registrymirror"
)
func TestFromCluster(t *testing.T) {
tests := []struct {
name string
cluster *v1alpha1.Cluster
want *registrymirror.RegistryMirror
}{
{
name: "with registry mirror",
cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
},
},
},
want: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443",
},
},
},
{
name: "with registry mirror and namespace",
cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
{
Registry: "783794618700.dkr,ecr.us-west-2.amazonaws.com",
Namespace: "curated-packages",
},
},
},
},
},
want: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443/eks-anywhere",
constants.DefaultCuratedPackagesRegistryRegex: "1.2.3.4:443/curated-packages",
},
},
},
{
name: "with registry mirror and public.ecr.aws only",
cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
},
},
want: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443/eks-anywhere",
},
},
},
{
name: "with registry mirror ca and auth",
cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
Authenticate: true,
CACertContent: "xyz",
},
},
},
want: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443",
},
Auth: true,
CACertContent: "xyz",
},
},
{
name: "without registry mirror",
cluster: &v1alpha1.Cluster{},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
result := registrymirror.FromCluster(tt.cluster)
if tt.want == nil {
g.Expect(result).To(BeNil())
} else {
g.Expect(result.BaseRegistry).To(Equal(tt.want.BaseRegistry))
g.Expect(len(result.NamespacedRegistryMap)).To(Equal(len(tt.want.NamespacedRegistryMap)))
for k, v := range tt.want.NamespacedRegistryMap {
g.Expect(result.NamespacedRegistryMap).Should(HaveKeyWithValue(k, v))
}
}
})
}
}
func TestFromClusterRegistryMirrorConfiguration(t *testing.T) {
testCases := []struct {
testName string
config *v1alpha1.RegistryMirrorConfiguration
want *registrymirror.RegistryMirror
}{
{
testName: "empty config",
config: nil,
want: nil,
},
{
testName: "no OCINamespaces",
config: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "harbor.eksa.demo",
Port: "30003",
OCINamespaces: nil,
Authenticate: true,
},
want: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "harbor.eksa.demo:30003",
},
Auth: true,
},
},
{
testName: "namespace for both eksa and curated packages",
config: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "harbor.eksa.demo",
Port: "30003",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
{
Registry: "783794618700.dkr.ecr.us-west-2.amazonaws.com",
Namespace: "curated-packages",
},
},
},
want: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "harbor.eksa.demo:30003/eks-anywhere",
constants.DefaultCuratedPackagesRegistryRegex: "harbor.eksa.demo:30003/curated-packages",
},
Auth: false,
},
},
{
testName: "namespace for eksa only",
config: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "harbor.eksa.demo",
Port: "30003",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "",
},
},
},
want: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "harbor.eksa.demo:30003",
},
Auth: false,
},
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
result := registrymirror.FromClusterRegistryMirrorConfiguration(tt.config)
if tt.want == nil {
g.Expect(result).To(BeNil())
} else {
g.Expect(result.BaseRegistry).To(Equal(tt.want.BaseRegistry))
g.Expect(len(result.NamespacedRegistryMap)).To(Equal(len(tt.want.NamespacedRegistryMap)))
for k, v := range tt.want.NamespacedRegistryMap {
g.Expect(result.NamespacedRegistryMap).Should(HaveKeyWithValue(k, v))
}
g.Expect(result.Auth).To(Equal(tt.want.Auth))
}
})
}
}
func TestCoreEKSAMirror(t *testing.T) {
testCases := []struct {
testName string
registryMirror *registrymirror.RegistryMirror
want string
}{
{
testName: "with namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443/eks-anywhere",
},
},
want: "1.2.3.4:443/eks-anywhere",
},
{
testName: "without namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443",
},
},
want: "1.2.3.4:443",
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.registryMirror.CoreEKSAMirror()).To(Equal(tt.want))
})
}
}
func TestCuratedPackagesMirror(t *testing.T) {
testCases := []struct {
testName string
registryMirror *registrymirror.RegistryMirror
want string
}{
{
testName: "with namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCuratedPackagesRegistryRegex: "1.2.3.4:443/curated-packages",
},
},
want: "1.2.3.4:443/curated-packages",
},
{
testName: "no required namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443/eks-anywhere",
},
},
want: "",
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.registryMirror.CuratedPackagesMirror()).To(Equal(tt.want))
})
}
}
func TestReplaceRegistry(t *testing.T) {
tests := []struct {
name string
registryMirror *registrymirror.RegistryMirror
URL string
want string
}{
{
name: "oci url without registry mirror",
registryMirror: nil,
URL: "oci://public.ecr.aws/product/chart",
want: "oci://public.ecr.aws/product/chart",
},
{
name: "oci url without namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCuratedPackagesRegistryRegex: "harbor.eksa.demo:30003/curated-packages",
},
},
URL: "oci://public.ecr.aws/product/chart",
want: "oci://public.ecr.aws/product/chart",
},
{
name: "oci url with namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "harbor.eksa.demo:30003/eks-anywhere",
},
},
URL: "oci://public.ecr.aws/product/chart",
want: "oci://harbor.eksa.demo:30003/eks-anywhere/product/chart",
},
{
name: "https url without registry mirror",
registryMirror: nil,
URL: "https://public.ecr.aws/product/site",
want: "https://public.ecr.aws/product/site",
},
{
name: "https url without namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
},
URL: "https://public.ecr.aws/product/site",
want: "https://public.ecr.aws/product/site",
},
{
name: "https url with namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCuratedPackagesRegistryRegex: "harbor.eksa.demo:30003/curated-packages",
},
},
URL: "https://783794618700.dkr,ecr.us-west-2.amazonaws.com/product/site",
want: "https://harbor.eksa.demo:30003/curated-packages/product/site",
},
{
name: "container image without registry mirror",
registryMirror: nil,
URL: "public.ecr.aws/product/image:tag",
want: "public.ecr.aws/product/image:tag",
},
{
name: "container image without namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCuratedPackagesRegistryRegex: "harbor.eksa.demo:30003/curated-packages",
},
},
URL: "public.ecr.aws/product/image:tag",
want: "public.ecr.aws/product/image:tag",
},
{
name: "container image without namespace",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "harbor.eksa.demo:30003",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "harbor.eksa.demo:30003/eks-anywhere",
},
},
URL: "public.ecr.aws/product/image:tag",
want: "harbor.eksa.demo:30003/eks-anywhere/product/image:tag",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.registryMirror.ReplaceRegistry(tt.URL)).To(Equal(tt.want))
})
}
}
| 388 |
eks-anywhere | aws | Go | package containerd
import (
"net/url"
"path/filepath"
"strings"
)
// ToAPIEndpoint turns URL to a valid API endpoint used in
// a containerd config file for a local registry.
// Original input is returned in case of malformed inputs.
func ToAPIEndpoint(url string) string {
u, err := parseURL(url)
if err != nil {
return url
}
if u.Path != "" {
u.Path = filepath.Join("v2", u.Path)
}
return strings.TrimPrefix(u.String(), "//")
}
func parseURL(in string) (*url.URL, error) {
urlIn := in
if !strings.Contains(in, "//") {
urlIn = "//" + in
}
return url.Parse(urlIn)
}
// ToAPIEndpoints utilizes ToAPIEndpoint to turn all URLs from a
// map to valid API endpoints for a local registry.
func ToAPIEndpoints(URLs map[string]string) map[string]string {
endpoints := make(map[string]string)
for key, url := range URLs {
endpoints[key] = ToAPIEndpoint(url)
}
return endpoints
}
| 40 |
eks-anywhere | aws | Go | package containerd_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/registrymirror/containerd"
)
func TestToAPIEndpoint(t *testing.T) {
tests := []struct {
name string
URL string
want string
}{
{
name: "no namespace",
URL: "oci://1.2.3.4:443",
want: "oci://1.2.3.4:443",
},
{
name: "no namespace",
URL: "registry-mirror.test:443",
want: "registry-mirror.test:443",
},
{
name: "with namespace",
URL: "oci://1.2.3.4:443/namespace",
want: "oci://1.2.3.4:443/v2/namespace",
},
{
name: "with namespace",
URL: "registry-mirror.test:443/namespace",
want: "registry-mirror.test:443/v2/namespace",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(containerd.ToAPIEndpoint(tt.URL)).To(Equal(tt.want))
})
}
}
func TestToAPIEndpoints(t *testing.T) {
tests := []struct {
name string
URLs map[string]string
want map[string]string
}{
{
name: "mix",
URLs: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443",
constants.DefaultCuratedPackagesRegistryRegex: "1.2.3.4:443/curated-packages",
},
want: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443",
constants.DefaultCuratedPackagesRegistryRegex: "1.2.3.4:443/v2/curated-packages",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
result := containerd.ToAPIEndpoints(tt.URLs)
g.Expect(len(result)).To(Equal(len(tt.want)))
for k, v := range tt.want {
g.Expect(result).Should(HaveKeyWithValue(k, v))
}
})
}
}
| 76 |
eks-anywhere | aws | Go | package retrier
import (
"math"
"time"
"github.com/aws/eks-anywhere/pkg/logger"
)
type Retrier struct {
retryPolicy RetryPolicy
timeout time.Duration
backoffFactor *float32
}
type (
// RetryPolicy allows to customize the retrying logic. The boolean retry indicates if a new retry
// should be performed and the wait duration indicates the wait time before the next retry.
RetryPolicy func(totalRetries int, err error) (retry bool, wait time.Duration)
RetrierOpt func(*Retrier)
)
// New creates a new retrier with a global timeout (max time allowed for the whole execution)
// The default retry policy is to always retry with no wait time in between retries.
func New(timeout time.Duration, opts ...RetrierOpt) *Retrier {
r := &Retrier{
timeout: timeout,
retryPolicy: zeroWaitPolicy,
}
for _, o := range opts {
o(r)
}
return r
}
// NewWithMaxRetries creates a new retrier with no global timeout and a max retries policy.
func NewWithMaxRetries(maxRetries int, backOffPeriod time.Duration) *Retrier {
// this value is roughly 292 years, so in practice there is no timeout
return New(time.Duration(math.MaxInt64), WithMaxRetries(maxRetries, backOffPeriod))
}
// NewWithNoTimeout creates a new retrier with no global timeout and infinite retries.
func NewWithNoTimeout() *Retrier {
return New(time.Duration(math.MaxInt64))
}
// WithMaxRetries sets a retry policy that will retry up to maxRetries times
// with a wait time between retries of backOffPeriod.
func WithMaxRetries(maxRetries int, backOffPeriod time.Duration) RetrierOpt {
return func(r *Retrier) {
r.retryPolicy = maxRetriesPolicy(maxRetries, backOffPeriod)
}
}
func WithBackoffFactor(factor float32) RetrierOpt {
return func(r *Retrier) {
r.backoffFactor = &factor
}
}
func WithRetryPolicy(policy RetryPolicy) RetrierOpt {
return func(r *Retrier) {
r.retryPolicy = policy
}
}
// Retry runs the fn function until it either successful completes (not error),
// the set timeout reached or the retry policy aborts the execution.
func (r *Retrier) Retry(fn func() error) error {
// While it seems aberrant to call a method with a nil receiver, several unit tests actually do. With a previous
// version of this module (which didn't attempt to dereference the receiver until after the wrapped function failed)
// these passed. Changes below, to log the receiver struct's key params changed that breaking the unit tests.
// The below conditional block restores the original behavior, enabling these tests to again pass.
if r == nil {
return fn()
}
start := time.Now()
retries := 0
var err error
logger.V(5).Info("Retrier:", "timeout", r.timeout, "backoffFactor", r.backoffFactor)
for retry := true; retry; retry = time.Since(start) < r.timeout {
err = fn()
retries += 1
if err == nil {
logger.V(5).Info("Retry execution successful", "retries", retries, "duration", time.Since(start))
return nil
}
logger.V(5).Info("Error happened during retry", "error", err, "retries", retries)
retry, wait := r.retryPolicy(retries, err)
if !retry {
logger.V(5).Info("Execution aborted by retry policy")
return err
}
if r.backoffFactor != nil {
wait = time.Duration(float32(wait) * (*r.backoffFactor * float32(retries)))
}
// If there's not enough time left for the policy-proposed wait, there's no value in waiting that duration
// before quitting at the bottom of the loop. Just do it now.
retrierTimeoutTime := start.Add(r.timeout)
policyTimeoutTime := time.Now().Add(wait)
if retrierTimeoutTime.Before(policyTimeoutTime) {
break
}
logger.V(5).Info("Sleeping before next retry", "time", wait)
time.Sleep(wait)
}
logger.V(5).Info("Timeout reached. Returning error", "retries", retries, "duration", time.Since(start), "error", err)
return err
}
// Retry runs fn with a MaxRetriesPolicy.
func Retry(maxRetries int, backOffPeriod time.Duration, fn func() error) error {
r := NewWithMaxRetries(maxRetries, backOffPeriod)
return r.Retry(fn)
}
func zeroWaitPolicy(_ int, _ error) (retry bool, wait time.Duration) {
return true, 0
}
func maxRetriesPolicy(maxRetries int, backOffPeriod time.Duration) RetryPolicy {
return func(totalRetries int, _ error) (retry bool, wait time.Duration) {
return totalRetries < maxRetries, backOffPeriod
}
}
| 133 |
eks-anywhere | aws | Go | package retrier_test
import (
"errors"
"testing"
"time"
"github.com/aws/eks-anywhere/pkg/retrier"
)
func TestNewWithMaxRetriesExhausted(t *testing.T) {
wantRetries := 10
r := retrier.NewWithMaxRetries(wantRetries, 0)
gotRetries := 0
fn := func() error {
gotRetries += 1
return errors.New("")
}
err := r.Retry(fn)
if err == nil {
t.Fatal("Retrier.Retry() error = nil, want not nil")
}
if gotRetries != wantRetries {
t.Fatalf("Wrong number of retries, got %d, want %d", gotRetries, wantRetries)
}
}
func TestNewWithMaxRetriesSuccessAfterRetries(t *testing.T) {
wantRetries := 5
r := retrier.NewWithMaxRetries(wantRetries, 0)
gotRetries := 0
fn := func() error {
gotRetries += 1
if wantRetries == gotRetries {
return nil
}
return errors.New("")
}
err := r.Retry(fn)
if err != nil {
t.Fatalf("Retrier.Retry() error = %v, want nil", err)
}
if gotRetries != wantRetries {
t.Fatalf("Wrong number of retries, got %d, want %d", gotRetries, wantRetries)
}
}
func TestNewWithNoTimeout(t *testing.T) {
r := retrier.NewWithNoTimeout()
fn := func() error {
return nil
}
err := r.Retry(fn)
if err != nil {
t.Fatalf("Retrier.Retry() error = %v, want nil", err)
}
}
func TestRetry(t *testing.T) {
wantRetries := 5
gotRetries := 0
fn := func() error {
gotRetries += 1
if wantRetries == gotRetries {
return nil
}
return errors.New("")
}
err := retrier.Retry(wantRetries, 0, fn)
if err != nil {
t.Fatalf("Retry() error = %v, want nil", err)
}
if gotRetries != wantRetries {
t.Fatalf("Wrong number of retries, got %d, want %d", gotRetries, wantRetries)
}
}
func TestNewDefaultFinishByFn(t *testing.T) {
wantRetries := 5
r := retrier.New(10 * time.Second)
gotRetries := 0
fn := func() error {
gotRetries += 1
if wantRetries == gotRetries {
return nil
}
return errors.New("")
}
err := r.Retry(fn)
if err != nil {
t.Fatalf("Retrier.Retry() error = %v, want nil", err)
}
if gotRetries != wantRetries {
t.Fatalf("Wrong number of retries, got %d, want %d", gotRetries, wantRetries)
}
}
func TestNewDefaultFinishByTimeout(t *testing.T) {
wantRetries := 100
r := retrier.New(1 * time.Microsecond)
gotRetries := 0
fn := func() error {
gotRetries += 1
time.Sleep(2 * time.Microsecond)
if wantRetries == gotRetries {
return nil
}
return errors.New("")
}
err := r.Retry(fn)
if err == nil {
t.Fatal("Retrier.Retry() error = nil, want not nil")
}
if gotRetries == wantRetries {
t.Fatalf("Retries shouldn't have got to wantRetries, got and want %d", gotRetries)
}
}
func TestNewWithRetryPolicyFinishByTimeout(t *testing.T) {
wantRetries := 100
retryPolicy := func(totalRetries int, _ error) (bool, time.Duration) {
return true, (2 * time.Microsecond)
}
gotRetries := 0
fn := func() error {
gotRetries += 1
time.Sleep(2 * time.Microsecond)
if wantRetries == gotRetries {
return nil
}
return errors.New("")
}
r := retrier.New(1*time.Microsecond, retrier.WithRetryPolicy(retryPolicy))
if err := r.Retry(fn); err == nil {
t.Fatalf("Retrier.Retry() error = nil, want not nil. Got retries = %d", gotRetries)
}
if gotRetries == wantRetries {
t.Fatalf("Retries shouldn't have got to wantRetries, got and want %d", gotRetries)
}
}
func TestNewWithRetryPolicyFinishByPolicy(t *testing.T) {
wantRetries := 5
retryPolicy := func(totalRetries int, _ error) (bool, time.Duration) {
if totalRetries == wantRetries {
return false, 0
}
return true, 0
}
gotRetries := 0
fn := func() error {
gotRetries += 1
return errors.New("")
}
r := retrier.New(1*time.Second, retrier.WithRetryPolicy(retryPolicy))
if err := r.Retry(fn); err == nil {
t.Fatal("Retrier.Retry() error = nil, want not nil")
}
if gotRetries != wantRetries {
t.Fatalf("Wrong number of retries, got %d, want %d", gotRetries, wantRetries)
}
}
func TestRetrierWithNilReceiver(t *testing.T) {
var retrier *retrier.Retrier = nil // This seems improbable, but happens in some other unit tests.
expectedError := errors.New("my expected error")
retryable := func() error {
return expectedError
}
err := retrier.Retry(retryable)
if err == nil || err.Error() != expectedError.Error() {
t.Errorf("Retrier didn't correctly handle nil receiver")
}
}
| 200 |
eks-anywhere | aws | Go | package semver
import (
"fmt"
"regexp"
"strconv"
)
const semverRegex = `^v?(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`
var semverRegexp = regexp.MustCompile(semverRegex)
type Version struct {
Major, Minor, Patch uint64
Prerelease, Buildmetadata string
}
func New(version string) (*Version, error) {
matches := semverRegexp.FindStringSubmatch(version)
namedGroups := make(map[string]string, len(matches))
groupNames := semverRegexp.SubexpNames()
for i, value := range matches {
name := groupNames[i]
if name != "" {
namedGroups[name] = value
}
}
v := &Version{}
var err error
v.Major, err = strconv.ParseUint(namedGroups["major"], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid major version in semver %s: %v", version, err)
}
v.Minor, err = strconv.ParseUint(namedGroups["minor"], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid minor version in semver %s: %v", version, err)
}
v.Patch, err = strconv.ParseUint(namedGroups["patch"], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid patch version in semver %s: %v", version, err)
}
v.Prerelease = namedGroups["prerelease"]
v.Buildmetadata = namedGroups["buildmetadata"]
return v, nil
}
func (v *Version) SameMajor(v2 *Version) bool {
return v.Major == v2.Major
}
func (v *Version) SameMinor(v2 *Version) bool {
return v.SameMajor(v2) && v.Minor == v2.Minor
}
func (v *Version) SamePatch(v2 *Version) bool {
return v.SameMinor(v2) && v.Patch == v2.Patch
}
func (v *Version) SamePrerelease(v2 *Version) bool {
return v.SamePatch(v2) && v.Prerelease == v2.Prerelease
}
func (v *Version) Equal(v2 *Version) bool {
return v.SamePrerelease(v2) && v.Buildmetadata == v2.Buildmetadata
}
func (v *Version) GreaterThan(v2 *Version) bool {
return v.Compare(v2) == 1
}
func (v *Version) LessThan(v2 *Version) bool {
return v.Compare(v2) == -1
}
func (v *Version) Compare(v2 *Version) int {
if c := compare(v.Major, v2.Major); c != 0 {
return c
}
if c := compare(v.Minor, v2.Minor); c != 0 {
return c
}
if c := compare(v.Patch, v2.Patch); c != 0 {
return c
}
return 0
}
func (v *Version) String() string {
return fmt.Sprintf("v%d.%d.%d", v.Major, v.Minor, v.Patch)
}
func compare(i, i2 uint64) int {
if i > i2 {
return 1
} else if i < i2 {
return -1
}
return 0
}
| 104 |
eks-anywhere | aws | Go | package semver_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/semver"
)
func TestNewError(t *testing.T) {
testCases := []struct {
testName string
version string
}{
{
testName: "empty",
version: "",
},
{
testName: "only letters",
version: "xxx",
},
{
testName: "only mayor",
version: "11",
},
{
testName: "no patch",
version: "11.1",
},
{
testName: "dot after patch",
version: "11.1.1.1",
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
if _, err := semver.New(tt.version); err == nil {
t.Fatalf("semver.New(%s) err = nil, want err not nil", tt.version)
}
})
}
}
func TestNewSuccess(t *testing.T) {
testCases := []struct {
testName string
version string
want *semver.Version
}{
{
testName: "only patch",
version: "0.0.4",
want: &semver.Version{
Major: 0,
Minor: 0,
Patch: 4,
},
},
{
testName: "only patch with double digit numbers",
version: "10.20.30",
want: &semver.Version{
Major: 10,
Minor: 20,
Patch: 30,
},
},
{
testName: "prerelease and meta",
version: "1.1.2-prerelease+meta",
want: &semver.Version{
Major: 1,
Minor: 1,
Patch: 2,
Prerelease: "prerelease",
Buildmetadata: "meta",
},
},
{
testName: "only meta with hyphen",
version: "1.1.2+meta-valid",
want: &semver.Version{
Major: 1,
Minor: 1,
Patch: 2,
Buildmetadata: "meta-valid",
},
},
{
testName: "prerelease and build with dots",
version: "2.0.0-rc.1+build.123",
want: &semver.Version{
Major: 2,
Minor: 0,
Patch: 0,
Prerelease: "rc.1",
Buildmetadata: "build.123",
},
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
if got, err := semver.New(tt.version); err != nil {
t.Fatalf("semver.New(%s) err = %s, want err = nil", tt.version, err)
} else if !got.Equal(tt.want) {
t.Fatalf("semver.New(%s) semver = %v, want %v", tt.version, got, tt.want)
}
})
}
}
func TestCompareSuccess(t *testing.T) {
testCases := []struct {
testName string
v1 *semver.Version
v2 *semver.Version
want int
}{
{
testName: "equal",
v1: &semver.Version{
Major: 1,
Minor: 0,
Patch: 4,
},
v2: &semver.Version{
Major: 1,
Minor: 0,
Patch: 4,
},
want: 0,
},
{
testName: "less than",
v1: &semver.Version{
Major: 1,
Minor: 0,
Patch: 3,
},
v2: &semver.Version{
Major: 1,
Minor: 0,
Patch: 4,
},
want: -1,
},
{
testName: "less than, diff major",
v1: &semver.Version{
Major: 1,
Minor: 2,
Patch: 3,
},
v2: &semver.Version{
Major: 2,
Minor: 0,
Patch: 4,
},
want: -1,
},
{
testName: "greater than",
v1: &semver.Version{
Major: 1,
Minor: 0,
Patch: 5,
},
v2: &semver.Version{
Major: 1,
Minor: 0,
Patch: 4,
},
want: 1,
},
{
testName: "greater than, diff major",
v1: &semver.Version{
Major: 2,
Minor: 1,
Patch: 3,
},
v2: &semver.Version{
Major: 1,
Minor: 2,
Patch: 4,
},
want: 1,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
got := tt.v1.Compare(tt.v2)
if got != tt.want {
t.Fatalf("semver.Compare() got = %v, want %v", got, tt.want)
}
})
}
}
| 201 |
eks-anywhere | aws | Go | package tar
import (
"compress/gzip"
"fmt"
"os"
)
func GzipTarFolder(sourceFolder, dstFile string) error {
tarfile, err := os.Create(dstFile)
if err != nil {
return fmt.Errorf("creating dst tar file: %v", err)
}
defer tarfile.Close()
gw := gzip.NewWriter(tarfile)
defer gw.Close()
if err := tarFolderToWriter(sourceFolder, gw); err != nil {
return fmt.Errorf("gzip taring folder [%s] to [%s]: %v", sourceFolder, dstFile, err)
}
return nil
}
func UnGzipTarFile(tarFile, dstFolder string) error {
tarball, err := os.Open(tarFile)
if err != nil {
return err
}
defer tarball.Close()
gr, err := gzip.NewReader(tarball)
if err != nil {
return err
}
defer gr.Close()
return Untar(gr, NewFolderRouter(dstFolder))
}
| 40 |
eks-anywhere | aws | Go | package tar
type GzipPackager struct{}
func NewGzipPackager() GzipPackager {
return GzipPackager{}
}
func (GzipPackager) Package(sourceFolder, dstFile string) error {
return GzipTarFolder(sourceFolder, dstFile)
}
func (GzipPackager) UnPackage(orgFile, dstFolder string) error {
return UnGzipTarFile(orgFile, dstFolder)
}
| 16 |
eks-anywhere | aws | Go | package tar_test
import (
"os"
"path/filepath"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/tar"
)
func TestUnGzipTarFile(t *testing.T) {
g := NewWithT(t)
dstFile := "dst.tar.gz"
untarFolder := "dst-untar"
g.Expect(os.MkdirAll(untarFolder, os.ModePerm))
t.Cleanup(func() {
os.Remove(dstFile)
os.RemoveAll(untarFolder)
})
g.Expect(tar.GzipTarFolder("testdata", dstFile)).To(Succeed())
g.Expect(dstFile).To(BeAnExistingFile())
g.Expect(tar.UnGzipTarFile(dstFile, untarFolder)).To(Succeed())
g.Expect(untarFolder).To(BeADirectory())
g.Expect(filepath.Join(untarFolder, "dummy1")).To(BeARegularFile())
g.Expect(filepath.Join(untarFolder, "dummy2")).To(BeARegularFile())
g.Expect(filepath.Join(untarFolder, "dummy3")).To(BeADirectory())
g.Expect(filepath.Join(untarFolder, "dummy3", "dummy4")).To(BeARegularFile())
}
| 33 |
eks-anywhere | aws | Go | package tar
type Packager struct{}
func NewPackager() Packager {
return Packager{}
}
func (Packager) Package(sourceFolder, dstFile string) error {
return TarFolder(sourceFolder, dstFile)
}
func (Packager) UnPackage(orgFile, dstFolder string) error {
return UntarFile(orgFile, dstFolder)
}
| 16 |
eks-anywhere | aws | Go | package tar
import (
"archive/tar"
"path/filepath"
)
// Router instructs where to extract a file.
type Router interface {
// ExtractPath instructs the path where a file should be extracted.
// Empty strings instructs to omit the file extraction
ExtractPath(header *tar.Header) string
}
type FolderRouter struct {
folder string
}
func NewFolderRouter(folder string) FolderRouter {
return FolderRouter{folder: folder}
}
func (f FolderRouter) ExtractPath(header *tar.Header) string {
return filepath.Join(f.folder, header.Name)
}
| 26 |
eks-anywhere | aws | Go | package tar
import (
"archive/tar"
"fmt"
"io"
"os"
)
func TarFolder(sourceFolder, dstFile string) error {
tarfile, err := os.Create(dstFile)
if err != nil {
return fmt.Errorf("creating dst tar file: %v", err)
}
defer tarfile.Close()
if err := tarFolderToWriter(sourceFolder, tarfile); err != nil {
return fmt.Errorf("taring folder [%s] to [%s]: %v", sourceFolder, dstFile, err)
}
return nil
}
func tarFolderToWriter(sourceFolder string, dst io.Writer) error {
walker := NewFolderWalker(sourceFolder)
return Tar(walker, dst)
}
type TarFunc func(file string, info os.FileInfo, header *tar.Header) error
type Walker interface {
Walk(TarFunc) error
}
func Tar(source Walker, dst io.Writer) error {
tw := tar.NewWriter(dst)
defer tw.Close()
if err := source.Walk(addToTar(tw)); err != nil {
return fmt.Errorf("taring: %v", err)
}
return nil
}
func addToTar(tw *tar.Writer) TarFunc {
return func(file string, info os.FileInfo, header *tar.Header) error {
if err := tw.WriteHeader(header); err != nil {
return err
}
if info.IsDir() {
return nil
}
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(tw, f); err != nil {
return err
}
return nil
}
}
| 69 |
eks-anywhere | aws | Go | package tar_test
import (
"os"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/tar"
)
func TestTarFolder(t *testing.T) {
dstFile := "dst.tar"
t.Cleanup(func() {
os.Remove(dstFile)
})
g := NewWithT(t)
g.Expect(tar.TarFolder("testdata", dstFile)).To(Succeed())
g.Expect(dstFile).To(BeAnExistingFile())
}
| 22 |
eks-anywhere | aws | Go | package tar
import (
"archive/tar"
"io"
"os"
)
func UntarFile(tarFile, dstFolder string) error {
reader, err := os.Open(tarFile)
if err != nil {
return err
}
defer reader.Close()
return Untar(reader, NewFolderRouter(dstFolder))
}
func Untar(source io.Reader, router Router) error {
tarReader := tar.NewReader(source)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
path := router.ExtractPath(header)
if path == "" {
continue
}
info := header.FileInfo()
if info.IsDir() {
if err = os.MkdirAll(path, info.Mode()); err != nil {
return err
}
continue
}
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, tarReader)
if err != nil {
return err
}
}
return nil
}
| 56 |
eks-anywhere | aws | Go | package tar_test
import (
"os"
"path/filepath"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/tar"
)
func TestUntarFile(t *testing.T) {
g := NewWithT(t)
dstFile := "dst.tar"
untarFolder := "dst-untar"
g.Expect(os.MkdirAll(untarFolder, os.ModePerm))
t.Cleanup(func() {
os.Remove(dstFile)
os.RemoveAll(untarFolder)
})
g.Expect(tar.TarFolder("testdata", dstFile)).To(Succeed())
g.Expect(dstFile).To(BeAnExistingFile())
g.Expect(tar.UntarFile(dstFile, untarFolder)).To(Succeed())
g.Expect(untarFolder).To(BeADirectory())
g.Expect(filepath.Join(untarFolder, "dummy1")).To(BeARegularFile())
g.Expect(filepath.Join(untarFolder, "dummy2")).To(BeARegularFile())
g.Expect(filepath.Join(untarFolder, "dummy3")).To(BeADirectory())
g.Expect(filepath.Join(untarFolder, "dummy3", "dummy4")).To(BeARegularFile())
}
| 33 |
eks-anywhere | aws | Go | package tar
import (
"archive/tar"
"fmt"
"os"
"path/filepath"
"strings"
)
func NewFolderWalker(folder string) FolderWalker {
return FolderWalker{
folder: folder,
folderPrefix: fmt.Sprintf("%s/", folder),
}
}
type FolderWalker struct {
folder, folderPrefix string
}
func (f FolderWalker) Walk(fn TarFunc) error {
return filepath.Walk(f.folder, f.trimFolder(fn))
}
func (f FolderWalker) trimFolder(fn TarFunc) filepath.WalkFunc {
return func(file string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if file == f.folder {
return nil
}
header, err := tar.FileInfoHeader(info, info.Name())
if err != nil {
return err
}
header.Name = strings.TrimPrefix(file, f.folderPrefix)
return fn(file, info, header)
}
}
| 45 |
eks-anywhere | aws | Go | package task
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces"
)
// Task is a logical unit of work - meant to be implemented by each Task.
type Task interface {
Run(ctx context.Context, commandContext *CommandContext) Task
Name() string
Checkpoint() *CompletedTask
Restore(ctx context.Context, commandContext *CommandContext, completedTask *CompletedTask) (Task, error)
}
// Command context maintains the mutable and shared entities.
type CommandContext struct {
Bootstrapper interfaces.Bootstrapper
Provider providers.Provider
ClusterManager interfaces.ClusterManager
GitOpsManager interfaces.GitOpsManager
Validations interfaces.Validator
Writer filewriter.FileWriter
EksdInstaller interfaces.EksdInstaller
PackageInstaller interfaces.PackageInstaller
EksdUpgrader interfaces.EksdUpgrader
ClusterUpgrader interfaces.ClusterUpgrader
CAPIManager interfaces.CAPIManager
ClusterSpec *cluster.Spec
CurrentClusterSpec *cluster.Spec
UpgradeChangeDiff *types.ChangeDiff
BootstrapCluster *types.Cluster
ManagementCluster *types.Cluster
WorkloadCluster *types.Cluster
Profiler *Profiler
OriginalError error
ManagementClusterStateDir string
ForceCleanup bool
}
func (c *CommandContext) SetError(err error) {
if c.OriginalError == nil {
c.OriginalError = err
}
}
type Profiler struct {
metrics map[string]map[string]time.Duration
starts map[string]map[string]time.Time
}
// profiler for a Task.
func (pp *Profiler) SetStartTask(taskName string) {
pp.SetStart(taskName, taskName)
}
// this can be used to profile sub tasks.
func (pp *Profiler) SetStart(taskName string, msg string) {
if _, ok := pp.starts[taskName]; !ok {
pp.starts[taskName] = map[string]time.Time{}
}
pp.starts[taskName][msg] = time.Now()
}
// needs to be called after setStart.
func (pp *Profiler) MarkDoneTask(taskName string) {
pp.MarkDone(taskName, taskName)
}
// this can be used to profile sub tasks.
func (pp *Profiler) MarkDone(taskName string, msg string) {
if _, ok := pp.metrics[taskName]; !ok {
pp.metrics[taskName] = map[string]time.Duration{}
}
if start, ok := pp.starts[taskName][msg]; ok {
pp.metrics[taskName][msg] = time.Since(start)
}
}
// get Metrics.
func (pp *Profiler) Metrics() map[string]map[string]time.Duration {
return pp.metrics
}
// debug logs for task metric.
func (pp *Profiler) logProfileSummary(taskName string) {
if durationMap, ok := pp.metrics[taskName]; ok {
for k, v := range durationMap {
if k != taskName {
logger.V(4).Info("Subtask finished", "task_name", taskName, "subtask_name", k, "duration", v)
}
}
if totalTaskDuration, ok := durationMap[taskName]; ok {
logger.V(4).Info("Task finished", "task_name", taskName, "duration", totalTaskDuration)
logger.V(4).Info("----------------------------------")
}
}
}
// Manages Task execution.
type taskRunner struct {
task Task
writer filewriter.FileWriter
withCheckpoint bool
}
type TaskRunnerOpt func(*taskRunner)
func WithCheckpointFile() TaskRunnerOpt {
return func(t *taskRunner) {
logger.V(4).Info("Checkpoint feature enabled")
t.withCheckpoint = true
}
}
func (tr *taskRunner) RunTask(ctx context.Context, commandContext *CommandContext) error {
checkpointFileName := fmt.Sprintf("%s-checkpoint.yaml", commandContext.ClusterSpec.Cluster.Name)
var checkpointInfo CheckpointInfo
var err error
commandContext.ManagementClusterStateDir = fmt.Sprintf("cluster-state-backup-%s", time.Now().Format("2006-01-02T15_04_05"))
commandContext.Profiler = &Profiler{
metrics: make(map[string]map[string]time.Duration),
starts: make(map[string]map[string]time.Time),
}
task := tr.task
start := time.Now()
defer taskRunnerFinalBlock(start)
checkpointInfo, err = tr.setupCheckpointInfo(commandContext, checkpointFileName)
if err != nil {
return err
}
for task != nil {
if completedTask, ok := checkpointInfo.CompletedTasks[task.Name()]; ok {
logger.V(4).Info("Restoring task", "task_name", task.Name())
nextTask, err := task.Restore(ctx, commandContext, completedTask)
if err != nil {
return fmt.Errorf("restoring checkpoint info: %v", err)
}
task = nextTask
continue
}
logger.V(4).Info("Task start", "task_name", task.Name())
commandContext.Profiler.SetStartTask(task.Name())
nextTask := task.Run(ctx, commandContext)
commandContext.Profiler.MarkDoneTask(task.Name())
commandContext.Profiler.logProfileSummary(task.Name())
if commandContext.OriginalError == nil {
checkpointInfo.taskCompleted(task.Name(), task.Checkpoint())
}
task = nextTask
}
if commandContext.OriginalError != nil {
if err := tr.saveCheckpoint(checkpointInfo, checkpointFileName); err != nil {
return err
}
}
return commandContext.OriginalError
}
func taskRunnerFinalBlock(startTime time.Time) {
logger.V(4).Info("Tasks completed", "duration", time.Since(startTime))
}
func NewTaskRunner(task Task, writer filewriter.FileWriter, opts ...TaskRunnerOpt) *taskRunner {
t := &taskRunner{
task: task,
writer: writer,
}
for _, o := range opts {
o(t)
}
return t
}
func (tr *taskRunner) saveCheckpoint(checkpointInfo CheckpointInfo, filename string) error {
logger.V(4).Info("Saving checkpoint", "file", filename)
content, err := yaml.Marshal(checkpointInfo)
if err != nil {
return fmt.Errorf("saving task runner checkpoint: %v\n", err)
}
if _, err = tr.writer.Write(filename, content); err != nil {
return fmt.Errorf("saving task runner checkpoint: %v\n", err)
}
return nil
}
func (tr *taskRunner) setupCheckpointInfo(commandContext *CommandContext, checkpointFileName string) (CheckpointInfo, error) {
checkpointInfo := newCheckpointInfo()
if tr.withCheckpoint {
checkpointFilePath := filepath.Join(commandContext.Writer.TempDir(), checkpointFileName)
if _, err := os.Stat(checkpointFilePath); err == nil {
checkpointFile, err := readCheckpointFile(checkpointFilePath)
if err != nil {
return checkpointInfo, err
}
checkpointInfo.CompletedTasks = checkpointFile.CompletedTasks
}
}
return checkpointInfo, nil
}
type TaskCheckpoint interface{}
type CheckpointInfo struct {
CompletedTasks map[string]*CompletedTask `json:"completedTasks"`
}
type CompletedTask struct {
Checkpoint TaskCheckpoint `json:"checkpoint"`
}
func newCheckpointInfo() CheckpointInfo {
return CheckpointInfo{
CompletedTasks: make(map[string]*CompletedTask),
}
}
func (c CheckpointInfo) taskCompleted(name string, completedTask *CompletedTask) {
c.CompletedTasks[name] = completedTask
}
func readCheckpointFile(file string) (*CheckpointInfo, error) {
logger.V(4).Info("Reading checkpoint", "file", file)
content, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("failed reading checkpoint file: %v\n", err)
}
checkpointInfo := &CheckpointInfo{}
err = yaml.Unmarshal(content, checkpointInfo)
if err != nil {
return nil, fmt.Errorf("failed unmarshalling checkpoint: %v\n", err)
}
return checkpointInfo, nil
}
/*
UnmarshalTaskCheckpoint marshals the received task checkpoint (type interface{}) then unmarshalls it into the desired type
specified in the Restore() method.
When reading from a yaml file, there isn't a direct way in Go to do a type conversion from interface{} to the desired type.
We use interface{} because the TaskCheckpoint type will vary depending on what's needed for a specific task. The known workaround
for this is to marshal & unmarshal it into the checkpoint type.
*/
func UnmarshalTaskCheckpoint(taskCheckpoint TaskCheckpoint, config TaskCheckpoint) error {
checkpointYaml, err := yaml.Marshal(taskCheckpoint)
if err != nil {
return nil
}
return yaml.Unmarshal(checkpointYaml, config)
}
| 269 |
eks-anywhere | aws | Go | package task_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/features"
writermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/task"
mocktasks "github.com/aws/eks-anywhere/pkg/task/mocks"
"github.com/aws/eks-anywhere/pkg/types"
)
func TestTaskRunnerRunTask(t *testing.T) {
tr := newTaskRunnerTest(t)
tr.taskA.EXPECT().Run(tr.ctx, tr.cmdContext).Return(tr.taskB).Times(1)
tr.taskA.EXPECT().Name().Return("taskA").Times(7)
tr.taskA.EXPECT().Checkpoint()
tr.taskB.EXPECT().Run(tr.ctx, tr.cmdContext).Return(tr.taskC).Times(1)
tr.taskB.EXPECT().Name().Return("taskB").Times(7)
tr.taskB.EXPECT().Checkpoint()
tr.taskC.EXPECT().Run(tr.ctx, tr.cmdContext).Return(nil).Times(1)
tr.taskC.EXPECT().Name().Return("taskC").Times(7)
tr.taskC.EXPECT().Checkpoint()
type fields struct {
tasks []task.Task
}
tests := []struct {
name string
fields fields
}{
{
name: "Task runs and next Task is triggered and profiles are captured",
fields: fields{
tasks: []task.Task{tr.taskA, tr.taskB, tr.taskC},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
runner := task.NewTaskRunner(tt.fields.tasks[0], tr.writer)
if err := runner.RunTask(tr.ctx, tr.cmdContext); err != nil {
t.Fatal(err)
}
})
for _, task := range tt.fields.tasks {
if _, ok := tr.cmdContext.Profiler.Metrics()[task.Name()]; !ok {
t.Fatal("Error Profiler doesn't have metrics")
}
}
}
}
func TestTaskRunnerRunTaskWithCheckpointSecondRunSuccess(t *testing.T) {
tt := newTaskRunnerTest(t)
tt.taskA.EXPECT().Restore(tt.ctx, tt.cmdContext, gomock.Any()).Return(tt.taskB, nil)
tt.taskA.EXPECT().Name().Return("taskA").Times(2)
tt.taskB.EXPECT().Run(tt.ctx, tt.cmdContext).Return(tt.taskC).Times(1)
tt.taskB.EXPECT().Name().Return("taskB").Times(6)
tt.taskB.EXPECT().Checkpoint()
tt.taskC.EXPECT().Run(tt.ctx, tt.cmdContext).Return(nil).Times(1)
tt.taskC.EXPECT().Name().Return("taskC").Times(6)
tt.taskC.EXPECT().Checkpoint()
tt.writer.EXPECT().TempDir().Return("testdata")
tasks := []task.Task{tt.taskA, tt.taskB, tt.taskC}
t.Setenv(features.CheckpointEnabledEnvVar, "true")
runner := task.NewTaskRunner(tasks[0], tt.cmdContext.Writer, task.WithCheckpointFile())
if err := runner.RunTask(tt.ctx, tt.cmdContext); err != nil {
t.Fatal(err)
}
if err := os.Unsetenv(features.CheckpointEnabledEnvVar); err != nil {
t.Fatal(err)
}
}
func TestTaskRunnerRunTaskWithCheckpointFirstRunFailed(t *testing.T) {
tt := newTaskRunnerTest(t)
tt.cmdContext.OriginalError = fmt.Errorf("error")
tt.taskA.EXPECT().Run(tt.ctx, tt.cmdContext).Return(nil)
tt.taskA.EXPECT().Name().Return("taskA").Times(5)
tt.writer.EXPECT().TempDir()
tt.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", tt.cmdContext.ClusterSpec.Cluster.Name), gomock.Any())
tasks := []task.Task{tt.taskA, tt.taskB}
t.Setenv(features.CheckpointEnabledEnvVar, "true")
runner := task.NewTaskRunner(tasks[0], tt.cmdContext.Writer, task.WithCheckpointFile())
if err := runner.RunTask(tt.ctx, tt.cmdContext); err == nil {
t.Fatalf("Task.RunTask want err, got nil")
}
if err := os.Unsetenv(features.CheckpointEnabledEnvVar); err != nil {
t.Fatal(err)
}
}
func TestTaskRunnerRunTaskWithCheckpointSecondRunRestoreFailure(t *testing.T) {
tt := newTaskRunnerTest(t)
tt.taskA.EXPECT().Restore(tt.ctx, tt.cmdContext, gomock.Any()).Return(nil, fmt.Errorf("error"))
tt.taskA.EXPECT().Name().Return("taskA").Times(2)
tt.writer.EXPECT().TempDir().Return("testdata")
tasks := []task.Task{tt.taskA, tt.taskB, tt.taskC}
t.Setenv(features.CheckpointEnabledEnvVar, "true")
runner := task.NewTaskRunner(tasks[0], tt.cmdContext.Writer, task.WithCheckpointFile())
if err := runner.RunTask(tt.ctx, tt.cmdContext); err == nil {
t.Fatalf("Task.Restore want err, got nil")
}
if err := os.Unsetenv(features.CheckpointEnabledEnvVar); err != nil {
t.Fatal(err)
}
}
func TestTaskRunnerRunTaskWithCheckpointSaveFailed(t *testing.T) {
tt := newTaskRunnerTest(t)
tt.cmdContext.OriginalError = fmt.Errorf("error")
tt.taskA.EXPECT().Run(tt.ctx, tt.cmdContext).Return(nil)
tt.taskA.EXPECT().Name().Return("taskA").Times(5)
tt.writer.EXPECT().TempDir()
tt.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", tt.cmdContext.ClusterSpec.Cluster.Name), gomock.Any()).Return("", fmt.Errorf("error"))
tasks := []task.Task{tt.taskA, tt.taskB}
t.Setenv(features.CheckpointEnabledEnvVar, "true")
runner := task.NewTaskRunner(tasks[0], tt.cmdContext.Writer, task.WithCheckpointFile())
if err := runner.RunTask(tt.ctx, tt.cmdContext); err == nil {
t.Fatalf("Task.RunTask want err, got nil")
}
if err := os.Unsetenv(features.CheckpointEnabledEnvVar); err != nil {
t.Fatal(err)
}
}
func TestTaskRunnerRunTaskWithCheckpointReadFailure(t *testing.T) {
tt := newTaskRunnerTest(t)
tt.cmdContext.ClusterSpec.Cluster.Name = "invalid"
tt.writer.EXPECT().TempDir().Return("testdata")
tasks := []task.Task{tt.taskA, tt.taskB, tt.taskC}
t.Setenv(features.CheckpointEnabledEnvVar, "true")
runner := task.NewTaskRunner(tasks[0], tt.cmdContext.Writer, task.WithCheckpointFile())
if err := runner.RunTask(tt.ctx, tt.cmdContext); err == nil {
t.Fatalf("Task.ReadCheckpointFile want err, got nil")
}
if err := os.Unsetenv(features.CheckpointEnabledEnvVar); err != nil {
t.Fatal(err)
}
}
func TestUnmarshalTaskCheckpointSuccess(t *testing.T) {
testConfigType := types.Cluster{}
testTaskCheckpoint := types.Cluster{
Name: "test-cluster",
KubeconfigFile: "test.kubeconfig",
ExistingManagement: false,
}
if err := task.UnmarshalTaskCheckpoint(testTaskCheckpoint, testConfigType); err != nil {
t.Fatalf("task.UnmarshalTaskCheckpoint err = %v, want nil", err)
}
}
type taskRunnerTest struct {
ctx context.Context
cmdContext *task.CommandContext
taskA *mocktasks.MockTask
taskB *mocktasks.MockTask
taskC *mocktasks.MockTask
writer *writermocks.MockFileWriter
}
func newTaskRunnerTest(t *testing.T) *taskRunnerTest {
ctrl := gomock.NewController(t)
cmdContext := &task.CommandContext{
ClusterSpec: &cluster.Spec{
Config: &cluster.Config{
Cluster: &v1alpha1.Cluster{},
},
},
}
cmdContext.ClusterSpec.Cluster.Name = "test-cluster"
writer := writermocks.NewMockFileWriter(ctrl)
cmdContext.Writer = writer
cleanTaskA := mocktasks.NewMockTask(ctrl)
cleanTaskB := mocktasks.NewMockTask(ctrl)
cleanTaskC := mocktasks.NewMockTask(ctrl)
return &taskRunnerTest{
ctx: context.Background(),
cmdContext: cmdContext,
taskA: cleanTaskA,
taskB: cleanTaskB,
taskC: cleanTaskC,
writer: writer,
}
}
| 218 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/task (interfaces: Task)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
task "github.com/aws/eks-anywhere/pkg/task"
gomock "github.com/golang/mock/gomock"
)
// MockTask is a mock of Task interface.
type MockTask struct {
ctrl *gomock.Controller
recorder *MockTaskMockRecorder
}
// MockTaskMockRecorder is the mock recorder for MockTask.
type MockTaskMockRecorder struct {
mock *MockTask
}
// NewMockTask creates a new mock instance.
func NewMockTask(ctrl *gomock.Controller) *MockTask {
mock := &MockTask{ctrl: ctrl}
mock.recorder = &MockTaskMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockTask) EXPECT() *MockTaskMockRecorder {
return m.recorder
}
// Checkpoint mocks base method.
func (m *MockTask) Checkpoint() *task.CompletedTask {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Checkpoint")
ret0, _ := ret[0].(*task.CompletedTask)
return ret0
}
// Checkpoint indicates an expected call of Checkpoint.
func (mr *MockTaskMockRecorder) Checkpoint() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Checkpoint", reflect.TypeOf((*MockTask)(nil).Checkpoint))
}
// Name mocks base method.
func (m *MockTask) Name() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Name")
ret0, _ := ret[0].(string)
return ret0
}
// Name indicates an expected call of Name.
func (mr *MockTaskMockRecorder) Name() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockTask)(nil).Name))
}
// Restore mocks base method.
func (m *MockTask) Restore(arg0 context.Context, arg1 *task.CommandContext, arg2 *task.CompletedTask) (task.Task, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Restore", arg0, arg1, arg2)
ret0, _ := ret[0].(task.Task)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Restore indicates an expected call of Restore.
func (mr *MockTaskMockRecorder) Restore(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Restore", reflect.TypeOf((*MockTask)(nil).Restore), arg0, arg1, arg2)
}
// Run mocks base method.
func (m *MockTask) Run(arg0 context.Context, arg1 *task.CommandContext) task.Task {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Run", arg0, arg1)
ret0, _ := ret[0].(task.Task)
return ret0
}
// Run indicates an expected call of Run.
func (mr *MockTaskMockRecorder) Run(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockTask)(nil).Run), arg0, arg1)
}
| 94 |
eks-anywhere | aws | Go | package templater
import (
"reflect"
"strings"
"sigs.k8s.io/yaml"
)
type PartialYaml map[string]interface{}
func (p PartialYaml) AddIfNotZero(k string, v interface{}) {
if !isZeroVal(v) {
p[k] = v
}
}
func isZeroVal(x interface{}) bool {
return x == nil || reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())
}
func (p PartialYaml) ToYaml() (string, error) {
b, err := yaml.Marshal(p)
if err != nil {
return "", err
}
s := string(b)
s = strings.TrimSuffix(s, "\n")
return s, nil
}
| 32 |
eks-anywhere | aws | Go | package templater_test
import (
"reflect"
"testing"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/templater"
)
func TestPartialYamlAddIfNotZero(t *testing.T) {
tests := []struct {
testName string
p templater.PartialYaml
k string
v interface{}
wantAdded bool
wantV interface{}
}{
{
testName: "add string",
p: templater.PartialYaml{},
k: "key",
v: "value",
wantAdded: true,
wantV: "value",
},
{
testName: "add nil",
p: templater.PartialYaml{},
k: "key",
v: nil,
wantAdded: false,
wantV: nil,
},
{
testName: "add empty string",
p: templater.PartialYaml{},
k: "key",
v: "",
wantAdded: false,
wantV: nil,
},
{
testName: "add present string",
p: templater.PartialYaml{
"key": "value_old",
},
k: "key",
v: "value_new",
wantAdded: true,
wantV: "value_new",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tt.p.AddIfNotZero(tt.k, tt.v)
gotV, gotAdded := tt.p[tt.k]
if tt.wantAdded != gotAdded {
t.Errorf("PartialYaml.AddIfNotZero() wasAdded = %v, wantAdded %v", gotAdded, tt.wantAdded)
}
if !reflect.DeepEqual(gotV, tt.wantV) {
t.Errorf("PartialYaml.AddIfNotZero() gotValue = %v, wantValue %v", gotV, tt.wantV)
}
})
}
}
func TestPartialYamlToYaml(t *testing.T) {
tests := []struct {
testName string
p templater.PartialYaml
wantFile string
wantErr bool
}{
{
testName: "simple object",
p: templater.PartialYaml{
"key1": "value 1",
"key2": 2,
"key3": "value3",
},
wantFile: "testdata/partial_yaml_object_expected.yaml",
wantErr: false,
},
{
testName: "map",
p: templater.PartialYaml{
"key1": "value 1",
"key2": 2,
"key3": map[string]string{
"key_nest1": "value nest",
"key_nest2": "value nest 2",
},
"key4": map[string]interface{}{
"key_nest1": "value nest",
"key_nest2": 22,
},
},
wantFile: "testdata/partial_yaml_map_expected.yaml",
wantErr: false,
},
{
testName: "array",
p: templater.PartialYaml{
"key1": "value 1",
"key2": 2,
"key3": []string{"value array 1", "value array 2"},
"key4": []interface{}{
map[string]interface{}{
"key_in_nest_array": "value",
"key_in_nest_array_2": 22,
},
},
},
wantFile: "testdata/partial_yaml_array_expected.yaml",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
got, err := tt.p.ToYaml()
if (err != nil) != tt.wantErr {
t.Fatalf("PartialYaml.ToYaml() error = %v, wantErr %v", err, tt.wantErr)
}
test.AssertContentToFile(t, got, tt.wantFile)
})
}
}
| 132 |
eks-anywhere | aws | Go | package templater
import (
"bytes"
"fmt"
"strings"
"text/template"
"github.com/aws/eks-anywhere/pkg/filewriter"
)
type Templater struct {
writer filewriter.FileWriter
}
func New(writer filewriter.FileWriter) *Templater {
return &Templater{
writer: writer,
}
}
func (t *Templater) WriteToFile(templateContent string, data interface{}, fileName string, f ...filewriter.FileOptionsFunc) (filePath string, err error) {
bytes, err := Execute(templateContent, data)
if err != nil {
return "", err
}
writtenFilePath, err := t.writer.Write(fileName, bytes, f...)
if err != nil {
return "", fmt.Errorf("writing template file: %v", err)
}
return writtenFilePath, nil
}
func (t *Templater) WriteBytesToFile(content []byte, fileName string, f ...filewriter.FileOptionsFunc) (filePath string, err error) {
writtenFilePath, err := t.writer.Write(fileName, content, f...)
if err != nil {
return "", fmt.Errorf("writing template file: %v", err)
}
return writtenFilePath, nil
}
func Execute(templateContent string, data interface{}) ([]byte, error) {
temp := template.New("tmpl")
funcMap := map[string]interface{}{
"indent": func(spaces int, v string) string {
pad := strings.Repeat(" ", spaces)
return pad + strings.Replace(v, "\n", "\n"+pad, -1)
},
"stringsJoin": strings.Join,
}
temp = temp.Funcs(funcMap)
temp, err := temp.Parse(templateContent)
if err != nil {
return nil, fmt.Errorf("parsing template: %v", err)
}
var buf bytes.Buffer
err = temp.Execute(&buf, data)
if err != nil {
return nil, fmt.Errorf("substituting values for template: %v", err)
}
return buf.Bytes(), nil
}
| 67 |
eks-anywhere | aws | Go | package templater_test
import (
"os"
"strings"
"testing"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/templater"
)
func TestTemplaterWriteToFileSuccess(t *testing.T) {
type dataStruct struct {
Key1, Key2, Key3, KeyAndValue3 string
Conditional bool
}
tests := []struct {
testName string
templateFile string
data dataStruct
fileName string
wantFilePath string
wantErr bool
}{
{
testName: "with conditional true",
templateFile: "testdata/test1_template.yaml",
data: dataStruct{
Key1: "value_1",
Key2: "value_2",
Key3: "value_3",
Conditional: true,
},
fileName: "file_tmp.yaml",
wantFilePath: "testdata/test1_conditional_true_want.yaml",
wantErr: false,
},
{
testName: "with conditional false",
templateFile: "testdata/test1_template.yaml",
data: dataStruct{
Key1: "value_1",
Key2: "value_2",
Key3: "value_3",
Conditional: false,
},
fileName: "file_tmp.yaml",
wantFilePath: "testdata/test1_conditional_false_want.yaml",
wantErr: false,
},
{
testName: "with indent",
templateFile: "testdata/test_indent_template.yaml",
data: dataStruct{
Key1: "value_1",
Key2: "value_2",
KeyAndValue3: "key3: value_3",
Conditional: true,
},
fileName: "file_tmp.yaml",
wantFilePath: "testdata/test_indent_want.yaml",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
_, writer := test.NewWriter(t)
tr := templater.New(writer)
templateContent := test.ReadFile(t, tt.templateFile)
gotFilePath, err := tr.WriteToFile(templateContent, tt.data, tt.fileName)
if (err != nil) != tt.wantErr {
t.Fatalf("Templater.WriteToFile() error = %v, wantErr %v", err, tt.wantErr)
}
if !strings.HasSuffix(gotFilePath, tt.fileName) {
t.Errorf("Templater.WriteToFile() = %v, want to end with %v", gotFilePath, tt.fileName)
}
test.AssertFilesEquals(t, gotFilePath, tt.wantFilePath)
})
}
}
func TestTemplaterWriteToFileError(t *testing.T) {
folder := "tmp_folder"
defer os.RemoveAll(folder)
writer, err := filewriter.NewWriter(folder)
if err != nil {
t.Fatalf("failed creating writer error = #{err}")
}
type dataStruct struct {
Key1, Key2, Key3 string
Conditional bool
}
tests := []struct {
testName string
templateFile string
data dataStruct
fileName string
}{
{
testName: "invalid template",
templateFile: "testdata/invalid_template.yaml",
data: dataStruct{
Key1: "value_1",
Key2: "value_2",
Key3: "value_3",
Conditional: true,
},
fileName: "file_tmp.yaml",
},
{
testName: "data doesn't exist",
templateFile: "testdata/key4_template.yaml",
data: dataStruct{
Key1: "value_1",
Key2: "value_2",
Key3: "value_3",
Conditional: false,
},
fileName: "file_tmp.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tr := templater.New(writer)
templateContent := test.ReadFile(t, tt.templateFile)
gotFilePath, err := tr.WriteToFile(templateContent, tt.data, tt.fileName)
if err == nil {
t.Errorf("Templater.WriteToFile() error = nil")
}
if gotFilePath != "" {
t.Errorf("Templater.WriteToFile() = %v, want nil", gotFilePath)
}
})
}
}
| 144 |
eks-anywhere | aws | Go | package templater
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/yaml"
)
const objectSeparator string = "\n---\n"
func AppendYamlResources(resources ...[]byte) []byte {
separator := []byte(objectSeparator)
size := 0
for _, resource := range resources {
size += len(resource) + len(separator)
}
b := make([]byte, 0, size)
for _, resource := range resources {
b = append(b, resource...)
b = append(b, separator...)
}
return b
}
func ObjectsToYaml(objs ...runtime.Object) ([]byte, error) {
r := [][]byte{}
for _, o := range objs {
b, err := yaml.Marshal(o)
if err != nil {
return nil, fmt.Errorf("failed to marshal object: %v", err)
}
r = append(r, b)
}
return AppendYamlResources(r...), nil
}
| 40 |
eks-anywhere | aws | Go | package types
import "context"
type Closer interface {
Close(ctx context.Context) error
}
| 8 |
eks-anywhere | aws | Go | package types
import "github.com/aws/eks-anywhere/release/api/v1alpha1"
type Cluster struct {
Name string
KubeconfigFile string
ExistingManagement bool // true is the cluster has EKS Anywhere management components
}
type InfrastructureBundle struct {
FolderName string
Manifests []v1alpha1.Manifest
}
| 15 |
eks-anywhere | aws | Go | package types
// EKSACliContextKey is defined to avoid conflict with other packages.
type EKSACliContextKey string
// InsecureRegistry can be used to bypass https registry certification check when push/pull images or artifacts.
var InsecureRegistry = EKSACliContextKey("insecure-registry")
| 8 |
eks-anywhere | aws | Go | package types
type DockerCredentials struct {
Username string
Password string
}
| 7 |
eks-anywhere | aws | Go | package types
type Lookup map[string]struct{}
func (l Lookup) IsPresent(v string) bool {
_, present := l[v]
return present
}
func (l Lookup) ToSlice() []string {
keys := make([]string, 0, len(l))
for k := range l {
keys = append(keys, k)
}
return keys
}
func SliceToLookup(slice []string) Lookup {
l := make(map[string]struct{}, len(slice))
for _, e := range slice {
l[e] = struct{}{}
}
return l
}
| 26 |
eks-anywhere | aws | Go | package types_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/types"
)
func TestLookupIsPresent(t *testing.T) {
tests := []struct {
testName string
value string
slice []string
wantPresent bool
}{
{
testName: "empty slice",
slice: []string{},
value: "v",
wantPresent: false,
},
{
testName: "nil slice",
slice: nil,
value: "v",
wantPresent: false,
},
{
testName: "value present",
slice: []string{"v2", "v1"},
value: "v",
wantPresent: false,
},
{
testName: "value present",
slice: []string{"v2", "v"},
value: "v",
wantPresent: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
l := types.SliceToLookup(tt.slice)
if got := l.IsPresent(tt.value); got != tt.wantPresent {
t.Errorf("Lookup.IsPresent() = %v, want %v", got, tt.wantPresent)
}
})
}
}
func TestLookupToSlice(t *testing.T) {
tests := []struct {
name string
l types.Lookup
want []string
}{
{
name: "empty",
l: types.Lookup{},
want: []string{},
},
{
name: "not empty",
l: types.SliceToLookup([]string{"a", "a", "a", "a"}),
want: []string{"a"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.l.ToSlice()).To(Equal(tt.want))
})
}
}
| 77 |
eks-anywhere | aws | Go | package types
import "time"
type Deployment struct {
Namespace string
Name string
Container string
}
type Machine struct {
Metadata MachineMetadata `json:"metadata"`
Status MachineStatus `json:"status"`
}
func (m *Machine) HasAnyLabel(labels []string) bool {
for _, label := range labels {
if _, ok := m.Metadata.Labels[label]; ok {
return true
}
}
return false
}
type MachineStatus struct {
NodeRef *ResourceRef `json:"nodeRef,omitempty"`
Conditions Conditions
}
type MachineMetadata struct {
Name string `json:"name,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
}
type ResourceRef struct {
APIVersion string `json:"apiVersion"`
Kind string `json:"kind"`
Name string `json:"Name"`
}
type Conditions []Condition
type ConditionType string
type ConditionStatus string
type Condition struct {
Type ConditionType `json:"type"`
Status ConditionStatus `json:"status"`
}
type CAPICluster struct {
Metadata Metadata
Status ClusterStatus
}
type ClusterStatus struct {
Phase string
Conditions Conditions
}
type Metadata struct {
Name string
}
type Datastores struct {
Info Info `json:"Info"`
}
type Info struct {
FreeSpace float64 `json:"FreeSpace"`
}
type NowFunc func() time.Time
type NodeReadyChecker func(status MachineStatus) bool
func WithNodeRef() NodeReadyChecker {
return func(status MachineStatus) bool {
return status.NodeRef != nil
}
}
func WithNodeHealthy() NodeReadyChecker {
return func(status MachineStatus) bool {
for _, c := range status.Conditions {
if c.Type == "NodeHealthy" {
return c.Status == "True"
}
}
return false
}
}
| 94 |
eks-anywhere | aws | Go | package types_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/types"
)
func TestHasAnyLabel(t *testing.T) {
tests := []struct {
testName string
labels map[string]string
wantLabels []string
hasAnyLabel bool
}{
{
testName: "empty labels",
labels: map[string]string{},
wantLabels: []string{"label_1"},
hasAnyLabel: false,
},
{
testName: "nil labels and want labels",
labels: nil,
wantLabels: nil,
hasAnyLabel: false,
},
{
testName: "empty want labels",
labels: map[string]string{
"label_1": "val_1",
},
wantLabels: []string{},
hasAnyLabel: false,
},
{
testName: "nil want labels",
labels: map[string]string{
"label_1": "val_1",
},
wantLabels: nil,
hasAnyLabel: false,
},
{
testName: "labels present",
labels: map[string]string{
"label_1": "val_1",
"label_2": "val_2",
},
wantLabels: []string{"label_1"},
hasAnyLabel: true,
},
{
testName: "any label present",
labels: map[string]string{
"label_1": "val_1",
"label_2": "val_2",
},
wantLabels: []string{"label_1", "label_3"},
hasAnyLabel: true,
},
{
testName: "labels not present",
labels: map[string]string{
"label_1": "val_1",
"label_2": "val_2",
},
wantLabels: []string{"label_3"},
hasAnyLabel: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
m := &types.Machine{}
m.Metadata.Labels = tt.labels
if got := m.HasAnyLabel(tt.wantLabels); got != tt.hasAnyLabel {
t.Errorf("machine.HasAnyLabel() = %v, want %v", got, tt.hasAnyLabel)
}
})
}
}
| 82 |
eks-anywhere | aws | Go | package types
type ChangeDiff struct {
ComponentReports []ComponentChangeDiff `json:"components"`
}
type ComponentChangeDiff struct {
ComponentName string `json:"name"`
OldVersion string `json:"oldVersion"`
NewVersion string `json:"newVersion"`
}
func NewChangeDiff(componentReports ...*ComponentChangeDiff) *ChangeDiff {
reports := make([]ComponentChangeDiff, 0, len(componentReports))
for _, r := range componentReports {
if r != nil {
reports = append(reports, *r)
}
}
return &ChangeDiff{
ComponentReports: reports,
}
}
func (c *ChangeDiff) Append(changeDiffs ...*ChangeDiff) {
for _, diff := range changeDiffs {
if diff != nil {
c.ComponentReports = append(c.ComponentReports, diff.ComponentReports...)
}
}
}
func (c *ChangeDiff) Changed() bool {
return len(c.ComponentReports) > 0
}
| 37 |
eks-anywhere | aws | Go | package types_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/types"
)
func TestAppend(t *testing.T) {
tests := []struct {
testName string
changeDiffs *types.ChangeDiff
componentReports []types.ComponentChangeDiff
}{
{
testName: "empty changeDiff",
componentReports: []types.ComponentChangeDiff{
{
ComponentName: "test",
OldVersion: "0.0.1",
NewVersion: "0.0.2",
},
},
changeDiffs: &types.ChangeDiff{},
},
{
testName: "non empty changeDiff",
componentReports: []types.ComponentChangeDiff{
{
ComponentName: "test2",
OldVersion: "0.0.2",
NewVersion: "0.0.3",
},
},
changeDiffs: &types.ChangeDiff{
[]types.ComponentChangeDiff{
{
ComponentName: "test",
OldVersion: "0.0.1",
NewVersion: "0.0.2",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
changeDiffs := &types.ChangeDiff{tt.componentReports}
prevLen := len(tt.changeDiffs.ComponentReports)
tt.changeDiffs.Append(changeDiffs)
if len(tt.changeDiffs.ComponentReports) != (len(tt.componentReports))+prevLen {
t.Errorf("Component Reports were not appended")
}
})
}
}
| 59 |
eks-anywhere | aws | Go | package oci
import (
"fmt"
"path/filepath"
"strings"
)
const OCIPrefix = "oci://"
func Split(artifact string) (path, tag string) {
lastInd := strings.LastIndex(artifact, ":")
if lastInd == -1 {
return artifact, ""
}
if lastInd == len(artifact)-1 {
return artifact[:lastInd], ""
}
return artifact[:lastInd], artifact[lastInd+1:]
}
func ChartURLAndVersion(chart string) (url, version string) {
path, version := Split(chart)
return URL(path), version
}
func ChartPushURL(chart string) string {
path, _ := Split(chart)
path = filepath.Dir(path)
return URL(path)
}
func URL(artifactPath string) string {
return fmt.Sprintf("%s%s", OCIPrefix, artifactPath)
}
| 38 |
eks-anywhere | aws | Go | package oci_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/utils/oci"
)
func TestURL(t *testing.T) {
tests := []struct {
name string
artifactPath string
want string
}{
{
name: "normal artifact",
artifactPath: "public.ecr.aws/folder/folder2/name",
want: "oci://public.ecr.aws/folder/folder2/name",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(oci.URL(tt.artifactPath)).To(Equal(tt.want))
})
}
}
func TestSplit(t *testing.T) {
tests := []struct {
name string
artifact string
wantPath, wantTag string
}{
{
name: "normal chart",
artifact: "public.ecr.aws/folder/folder2/name:v1.0.0",
wantPath: "public.ecr.aws/folder/folder2/name",
wantTag: "v1.0.0",
},
{
name: "no version",
artifact: "public.ecr.aws/folder/folder2/name",
wantPath: "public.ecr.aws/folder/folder2/name",
wantTag: "",
},
{
name: "no version with colon",
artifact: "public.ecr.aws/folder/folder2/name:",
wantPath: "public.ecr.aws/folder/folder2/name",
wantTag: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
gotPath, gotTag := oci.Split(tt.artifact)
g.Expect(gotPath).To(Equal(tt.wantPath))
g.Expect(gotTag).To(Equal(tt.wantTag))
})
}
}
func TestChartURLAndVersion(t *testing.T) {
tests := []struct {
name string
chart string
wantURL, wantVersion string
}{
{
name: "normal chart",
chart: "public.ecr.aws/folder/folder2/name:v1.0.0",
wantURL: "oci://public.ecr.aws/folder/folder2/name",
wantVersion: "v1.0.0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
gotURL, gotVersion := oci.ChartURLAndVersion(tt.chart)
g.Expect(gotURL).To(Equal(tt.wantURL))
g.Expect(gotVersion).To(Equal(tt.wantVersion))
})
}
}
func TestChartPushURL(t *testing.T) {
tests := []struct {
name string
chart string
wantURL string
}{
{
name: "normal chart",
chart: "public.ecr.aws/folder/folder2/name:v1.0.0",
wantURL: "oci://public.ecr.aws/folder/folder2",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(oci.ChartPushURL(tt.chart)).To(Equal(tt.wantURL))
})
}
}
| 108 |
eks-anywhere | aws | Go | /*
Package ptr provides utility functions for converting non-addressable primitive types to pointers.
Its useful in contexts where a variable gives nil primitive type pointers semantics
(often meaning "not set") which can make it annoying to set the value.
Example
type Foo struct {
A *int
}
func main() {
foo := Foo{
A: ptr.Int(1)
}
}
*/
package ptr
func Int(v int) *int {
return &v
}
func Int8(v int8) *int8 {
return &v
}
func Int16(v int16) *int16 {
return &v
}
func Int32(v int32) *int32 {
return &v
}
func Int64(v int64) *int64 {
return &v
}
func Uint(v uint) *uint {
return &v
}
func Uint8(v uint8) *uint8 {
return &v
}
func Uint16(v uint16) *uint16 {
return &v
}
func Uint32(v uint32) *uint32 {
return &v
}
func Uint64(v uint64) *uint64 {
return &v
}
func Float32(v float32) *float32 {
return &v
}
func Float64(v float64) *float64 {
return &v
}
func String(v string) *string {
return &v
}
func Bool(v bool) *bool {
return &v
}
func Byte(v byte) *byte {
return &v
}
func Rune(v rune) *rune {
return &v
}
func Complex64(v complex64) *complex64 {
return &v
}
func Complex128(v complex128) *complex128 {
return &v
}
| 91 |
eks-anywhere | aws | Go | package unstructured
import (
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/cluster-api/util/yaml"
)
func YamlToUnstructured(yamlObjects []byte) ([]unstructured.Unstructured, error) {
// Using this CAPI util for now, not sure if we want to depend on it but it's well written
return yaml.ToUnstructured(yamlObjects)
}
func UnstructuredToYaml(yamlObjects []unstructured.Unstructured) ([]byte, error) {
// Using this CAPI util for now, not sure if we want to depend on it but it's well written
return yaml.FromUnstructured(yamlObjects)
}
// StripNull removes all null fields from the provided yaml.
func StripNull(resources []byte) ([]byte, error) {
uList, err := YamlToUnstructured(resources)
if err != nil {
return nil, fmt.Errorf("converting yaml to unstructured: %v", err)
}
for _, u := range uList {
stripNull(u.Object)
}
return UnstructuredToYaml(uList)
}
func stripNull(m map[string]interface{}) {
val := reflect.ValueOf(m)
for _, key := range val.MapKeys() {
v := val.MapIndex(key)
if v.IsNil() {
delete(m, key.String())
continue
}
if t, ok := v.Interface().(map[string]interface{}); ok {
stripNull(t)
}
}
}
| 46 |
eks-anywhere | aws | Go | package unstructured_test
import (
"bytes"
"testing"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
unstructuredutil "github.com/aws/eks-anywhere/pkg/utils/unstructured"
)
func TestYamlToClientObjects(t *testing.T) {
tests := []struct {
name string
yaml []byte
want map[string]unstructured.Unstructured
}{
{
name: "two objects",
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: ns-1
spec:
paused: true
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-2
namespace: ns-1
spec:
controlPlaneEndpoint:
host: 1.1.1.1
port: 8080`),
want: map[string]unstructured.Unstructured{
"cluster-1": {
Object: map[string]interface{}{
"apiVersion": "cluster.x-k8s.io/v1beta1",
"kind": "Cluster",
"metadata": map[string]interface{}{
"name": "cluster-1",
"namespace": "ns-1",
},
"spec": map[string]interface{}{
"paused": true,
},
},
},
"cluster-2": {
Object: map[string]interface{}{
"apiVersion": "cluster.x-k8s.io/v1beta1",
"kind": "Cluster",
"metadata": map[string]interface{}{
"name": "cluster-2",
"namespace": "ns-1",
},
"spec": map[string]interface{}{
"controlPlaneEndpoint": map[string]interface{}{
"host": "1.1.1.1",
"port": float64(8080),
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := unstructuredutil.YamlToUnstructured(tt.yaml)
g.Expect(err).To(BeNil(), "YamlToClientObjects() returned an error")
g.Expect(len(got)).To(Equal(len(tt.want)), "Should have got %d objects", len(tt.want))
for _, obj := range got {
g.Expect(obj).To(Equal(tt.want[obj.GetName()]))
}
})
}
}
func TestClientObjectsToYaml(t *testing.T) {
tests := []struct {
name string
want []byte
objs []unstructured.Unstructured
}{
{
name: "two objects",
want: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: ns-1
spec:
paused: true
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-2
namespace: ns-1
spec:
controlPlaneEndpoint:
host: 1.1.1.1
port: 8080`),
objs: []unstructured.Unstructured{
{
Object: map[string]interface{}{
"apiVersion": "cluster.x-k8s.io/v1beta1",
"kind": "Cluster",
"metadata": map[string]interface{}{
"name": "cluster-1",
"namespace": "ns-1",
},
"spec": map[string]interface{}{
"paused": true,
},
},
},
{
Object: map[string]interface{}{
"apiVersion": "cluster.x-k8s.io/v1beta1",
"kind": "Cluster",
"metadata": map[string]interface{}{
"name": "cluster-2",
"namespace": "ns-1",
},
"spec": map[string]interface{}{
"controlPlaneEndpoint": map[string]interface{}{
"host": "1.1.1.1",
"port": float64(8080),
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := unstructuredutil.UnstructuredToYaml(tt.objs)
g.Expect(err).To(BeNil(), "ClientObjectsToYaml() returned an error")
g.Expect(len(got)).To(Equal(len(tt.want)), "Should have got yaml of length", len(tt.want))
res := bytes.Compare(tt.want, got)
g.Expect(res).To(Equal(0), "ClientObjectsToYaml() produced erroneous yaml")
})
}
}
func TestYamlStripNull(t *testing.T) {
hardwareYaml := []byte(`apiVersion: tinkerbell.org/v1alpha1
kind: Hardware
metadata:
creationTimestamp: ~
labels:
type: cp
name: eksa-dev27
namespace: eksa-system
spec:
bmcRef:
apiGroup:
kind: Machine
name: bmc-eksa-dev27
disks:
- device: /dev/sda
interfaces:
- dhcp:
arch: x86_64
hostname: eksa-dev27
ip:
address: 10.80.8.38
family: 4
gateway: 10.80.8.1
netmask: 255.255.252.0
lease_time: 4294967294
mac: 88:e9:a4:58:5c:ac
name_servers:
- 8.8.8.8
- 8.8.4.4
uefi: true
netboot:
allowPXE: true
allowWorkflow: true
metadata:
facility:
facility_code: onprem
plan_slug: c2.medium.x86
instance:
allow_pxe: true
always_pxe: true
hostname: eksa-dev27
id: 88:e9:a4:58:5c:ac
ips:
- address: 10.80.8.38
family: 4
gateway: 10.80.8.1
netmask: 255.255.252.0
public: true
operating_system: {}
status: {}
---
apiVersion: bmc.tinkerbell.org/v1alpha1
kind: Machine
metadata:
creationTimestamp: null
name: bmc-eksa-dev27
namespace: eksa-system
spec:
connection:
authSecretRef:
name: bmc-eksa-dev27-auth
namespace: eksa-system
host: 10.80.12.46
insecureTLS: true
port: 0
status: {}
---
apiVersion: v1
data:
password: TTZiaUhFcE0=
username: QWRtaW5pc3RyYXRvcg==
kind: Secret
metadata:
creationTimestamp: null
labels:
clusterctl.cluster.x-k8s.io/move: "true"
name: bmc-eksa-dev27-auth
namespace: eksa-system
type: kubernetes.io/basic-auth`)
wantHardwareYaml := []byte(`apiVersion: tinkerbell.org/v1alpha1
kind: Hardware
metadata:
labels:
type: cp
name: eksa-dev27
namespace: eksa-system
spec:
bmcRef:
kind: Machine
name: bmc-eksa-dev27
disks:
- device: /dev/sda
interfaces:
- dhcp:
arch: x86_64
hostname: eksa-dev27
ip:
address: 10.80.8.38
family: 4
gateway: 10.80.8.1
netmask: 255.255.252.0
lease_time: 4294967294
mac: 88:e9:a4:58:5c:ac
name_servers:
- 8.8.8.8
- 8.8.4.4
uefi: true
netboot:
allowPXE: true
allowWorkflow: true
metadata:
facility:
facility_code: onprem
plan_slug: c2.medium.x86
instance:
allow_pxe: true
always_pxe: true
hostname: eksa-dev27
id: 88:e9:a4:58:5c:ac
ips:
- address: 10.80.8.38
family: 4
gateway: 10.80.8.1
netmask: 255.255.252.0
public: true
operating_system: {}
status: {}
---
apiVersion: bmc.tinkerbell.org/v1alpha1
kind: Machine
metadata:
name: bmc-eksa-dev27
namespace: eksa-system
spec:
connection:
authSecretRef:
name: bmc-eksa-dev27-auth
namespace: eksa-system
host: 10.80.12.46
insecureTLS: true
port: 0
status: {}
---
apiVersion: v1
data:
password: TTZiaUhFcE0=
username: QWRtaW5pc3RyYXRvcg==
kind: Secret
metadata:
labels:
clusterctl.cluster.x-k8s.io/move: "true"
name: bmc-eksa-dev27-auth
namespace: eksa-system
type: kubernetes.io/basic-auth`)
g := NewWithT(t)
got, err := unstructuredutil.StripNull(hardwareYaml)
g.Expect(err).To(BeNil())
g.Expect(got).To(Equal(wantHardwareYaml))
}
| 316 |
eks-anywhere | aws | Go | package urls
import (
"net/url"
"strings"
)
// ReplaceHost replaces the host in a url
// It supports full URLs and container image URLs
// If the provided original url is malformed, the are no guarantees
// that the returned value will be valid
// If host is empty, it will return the original URL.
func ReplaceHost(orgURL, host string) string {
if host == "" {
return orgURL
}
u, _ := url.Parse(orgURL)
if u.Scheme == "" {
u, _ = url.Parse("oci://" + orgURL)
u.Scheme = ""
}
u.Host = host
return strings.TrimPrefix(u.String(), "//")
}
| 26 |
eks-anywhere | aws | Go | package urls_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/utils/urls"
)
func TestReplaceHost(t *testing.T) {
tests := []struct {
name string
orgURL string
host string
want string
}{
{
name: "oci url",
orgURL: "oci://public.ecr.aws/product/chart",
host: "1.2.3.4:443",
want: "oci://1.2.3.4:443/product/chart",
},
{
name: "https url",
orgURL: "https://public.ecr.aws/product/site",
host: "1.2.3.4:443",
want: "https://1.2.3.4:443/product/site",
},
{
name: "container image",
orgURL: "public.ecr.aws/product/image:tag",
host: "1.2.3.4:443",
want: "1.2.3.4:443/product/image:tag",
},
{
name: "empty host",
orgURL: "public.ecr.aws/product/image:tag",
want: "public.ecr.aws/product/image:tag",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(urls.ReplaceHost(tt.orgURL, tt.host)).To(Equal(tt.want))
})
}
}
| 49 |
eks-anywhere | aws | Go | package yaml
import (
"bytes"
"fmt"
"sigs.k8s.io/yaml"
)
// Join joins YAML resources into a single YAML document. It does not validate individual
// resources.
func Join(resources [][]byte) []byte {
return bytes.Join(resources, []byte("\n---\n"))
}
// Serialize serializes objects into YAML documents.
func Serialize[T any](objs ...T) ([][]byte, error) {
r := [][]byte{}
for _, o := range objs {
b, err := yaml.Marshal(o)
if err != nil {
return nil, fmt.Errorf("marshalling object: %v", err)
}
r = append(r, b)
}
return r, nil
}
| 28 |
eks-anywhere | aws | Go | /*
Package validation implements tools to validate data objects.
These might be used from the CLI and/or the controller.
This package shoul not, under any circumtance, include specific validation logic.
Only the tools to operate that logic should live here.
*/
package validation
| 10 |
eks-anywhere | aws | Go | package validation
import "errors"
// Remediable is an error that provides a possible remediation.
type Remediable interface {
Remediation() string
}
// remediableError implements Fixable around a generic error.
type remediableError struct {
error
remediation string
}
// Remediation returns a possible solution to the error.
func (e *remediableError) Remediation() string {
return e.remediation
}
// NewRemediableErr returns a new [Remediable] error.
func NewRemediableErr(err, remediation string) error {
return &remediableError{
error: errors.New(err),
remediation: remediation,
}
}
// WithRemediation makes an error [Remediable].
func WithRemediation(err error, remediation string) error {
return &remediableError{
error: err,
remediation: remediation,
}
}
// IsRemediable checks if an error has a remediation.
func IsRemediable(err error) bool {
_, ok := err.(Remediable)
return ok
}
// Remediation returns the Remediation message for an error if it has it.
// Otherwise it returns an empty string.
func Remediation(err error) string {
fixable, ok := err.(Remediable)
if !ok {
return ""
}
return fixable.Remediation()
}
| 53 |
eks-anywhere | aws | Go | package validation_test
import (
"errors"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/validation"
)
func TestErrorRemediationWithRemediation(t *testing.T) {
g := NewWithT(t)
err := errors.New("my error")
remediable := validation.WithRemediation(err, "this is how you fix it")
g.Expect(validation.IsRemediable(remediable)).To(BeTrue())
g.Expect(validation.Remediation(remediable)).To(Equal("this is how you fix it"))
}
func TestIsRemediable(t *testing.T) {
tests := []struct {
name string
err error
want bool
}{
{
name: "remediable",
err: validation.NewRemediableErr("one error", "just fix it"),
want: true,
},
{
name: "non remediable",
err: errors.New("non fixable"),
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(validation.IsRemediable(tt.err)).To(Equal(tt.want))
})
}
}
func TestRemediation(t *testing.T) {
tests := []struct {
name string
err error
want string
}{
{
name: "remediable",
err: validation.NewRemediableErr("one error", "just fix it"),
want: "just fix it",
},
{
name: "non remediable",
err: errors.New("non fixable"),
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(validation.Remediation(tt.err)).To(Equal(tt.want))
})
}
}
| 70 |
eks-anywhere | aws | Go | package validation_test
import (
"context"
"errors"
"fmt"
"github.com/go-logr/logr"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cli"
"github.com/aws/eks-anywhere/pkg/cluster"
eksaerrors "github.com/aws/eks-anywhere/pkg/errors"
"github.com/aws/eks-anywhere/pkg/validation"
)
func ExampleRunner_RunAll_validations() {
log := test.NewNullLogger()
r := validation.NewRunner[*cluster.Spec](validation.WithMaxJobs(10))
cpValidator := newControlPlaneValidator(log)
r.Register(
func(ctx context.Context, spec *cluster.Spec) error {
if spec.Cluster.Spec.KubernetesVersion == "" {
return errors.New("kubernetesVersion can't be empty")
}
return nil
},
validation.Sequentially(
func(ctx context.Context, spec *cluster.Spec) error {
if spec.Cluster.Name == "" {
return validation.WithRemediation(
errors.New("cluster name is empty"),
"set a name for your cluster",
)
}
return nil
},
cpValidator.validateCount,
),
)
ctx := context.Background()
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = ""
s.Cluster.Spec.KubernetesVersion = anywherev1.Kube124
s.Cluster.Spec.ControlPlaneConfiguration.Count = 0
})
agg := r.RunAll(ctx, spec)
if agg != nil {
printErrors(agg)
return
}
fmt.Println("Cluster config is valid")
// Output:
// Invalid cluster config
// - cluster name is empty. Try to set a name for your cluster
// - control plane node count can't be 0
}
func printErrors(agg eksaerrors.Aggregate) {
fmt.Println("Invalid cluster config")
for _, err := range agg.Errors() {
msg := "- " + err.Error()
if validation.IsRemediable(err) {
msg += ". Try to " + validation.Remediation(err)
}
fmt.Println(msg)
}
}
type controlPlaneValidator struct {
log logr.Logger
}
func newControlPlaneValidator(log logr.Logger) *controlPlaneValidator {
return &controlPlaneValidator{log: log}
}
func (v *controlPlaneValidator) validateCount(ctx context.Context, spec *cluster.Spec) error {
if spec.Cluster.Spec.ControlPlaneConfiguration.Count == 0 {
cli.ValidationFailed(v.log, "Control plane invalid")
return errors.New("control plane node count can't be 0")
}
cli.ValidationPassed(v.log, "Control plane valid")
return nil
}
| 94 |
eks-anywhere | aws | Go | package validation
import (
"context"
"reflect"
"runtime"
"sync"
"github.com/aws/eks-anywhere/pkg/errors"
)
// Validatable is anything that can be validated.
type Validatable[O any] interface {
DeepCopy() O
}
// Validation is the logic for a validation of a type O.
type Validation[O Validatable[O]] func(ctx context.Context, obj O) error
// Runner allows to compose and run validations.
type Runner[O Validatable[O]] struct {
validations []Validation[O]
config *RunnerConfig
}
// RunnerConfig contains the configuration for a Runner.
type RunnerConfig struct {
maxJobs int
}
// RunnerOpt allows to configure a Runner with optional parameters.
type RunnerOpt func(*RunnerConfig)
// WithMaxJobs sets the maximun number of concurrent routines the runner will use.
func WithMaxJobs(m int) RunnerOpt {
return func(c *RunnerConfig) {
c.maxJobs = m
}
}
// NewRunner constructs a new Runner.
func NewRunner[O Validatable[O]](opts ...RunnerOpt) *Runner[O] {
r := &Runner[O]{
config: &RunnerConfig{
maxJobs: runtime.GOMAXPROCS(0),
},
}
for _, opt := range opts {
opt(r.config)
}
return r
}
// Register adds validations to the Runner.
func (r *Runner[O]) Register(validations ...Validation[O]) {
r.validations = append(r.validations, validations...)
}
// RunAll runs all validations concurrently and waits until they all finish,
// aggregating the errors if present. obj must not be modified. If it is, this
// indicates a programming error and the method will panic.
func (r *Runner[O]) RunAll(ctx context.Context, obj O) errors.Aggregate {
copyObj := obj.DeepCopy()
var allErr []error
for err := range r.run(ctx, obj) {
allErr = append(allErr, err)
}
if !reflect.DeepEqual(obj, copyObj) {
panic("validations must not modify the object under validation")
}
return errors.NewAggregate(allErr)
}
func (r *Runner[O]) run(ctx context.Context, obj O) <-chan error {
results := make(chan error)
validations := make(chan Validation[O])
var wg sync.WaitGroup
numWorkers := r.config.maxJobs
if numWorkers > len(r.validations) {
numWorkers = len(r.validations)
}
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
for validate := range validations {
if err := validate(ctx, obj); err != nil {
for _, err := range flatten(err) {
results <- err
}
}
}
wg.Done()
}()
}
go func() {
for _, v := range r.validations {
validations <- v
}
close(validations)
}()
go func() {
wg.Wait()
close(results)
}()
return results
}
// Sequentially composes a set of validations into one which will run them sequentially and in order.
func Sequentially[O Validatable[O]](validations ...Validation[O]) Validation[O] {
return func(ctx context.Context, obj O) error {
var allErr []error
for _, h := range validations {
if err := h(ctx, obj); err != nil {
allErr = append(allErr, flatten(err)...)
}
}
return errors.NewAggregate(allErr)
}
}
// flatten unfolds and flattens errors inside a errors.Aggregate. If err is not
// a errors.Aggregate, it just returns a slice with one single error.
func flatten(err error) []error {
if agg, ok := err.(errors.Aggregate); ok {
return errors.Flatten(agg).Errors()
}
return []error{err}
}
| 139 |
eks-anywhere | aws | Go | package validation_test
import (
"context"
"errors"
"testing"
. "github.com/onsi/gomega"
eksaerrors "github.com/aws/eks-anywhere/pkg/errors"
"github.com/aws/eks-anywhere/pkg/validation"
)
func TestRunnerRunAllSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
r := validation.NewRunner[*apiCluster]()
r.Register(
func(ctx context.Context, cluster *apiCluster) error {
if cluster.controlPlaneCount == 0 {
return errors.New("controlPlaneCount can't be 0")
}
return nil
},
func(ctx context.Context, cluster *apiCluster) error {
if cluster.bundlesName == "" {
return errors.New("bundlesName can't be empty")
}
return nil
},
)
cluster := &apiCluster{
controlPlaneCount: 3,
bundlesName: "bundles-1",
}
g.Expect(r.RunAll(ctx, cluster)).To(Succeed())
}
func TestRunnerRunAllSequentially(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
r := validation.NewRunner[*apiCluster](validation.WithMaxJobs(10))
callCounter := 0
r.Register(
func(ctx context.Context, cluster *apiCluster) error {
if cluster.bundlesName == "" {
return errors.New("bundlesName can't be empty")
}
return nil
},
validation.Sequentially(
func(ctx context.Context, _ *apiCluster) error {
g.Expect(callCounter).To(Equal(0))
callCounter++
return errors.New("invalid 1")
},
func(ctx context.Context, _ *apiCluster) error {
g.Expect(callCounter).To(Equal(1))
callCounter++
return errors.New("invalid 2")
},
),
)
cluster := &apiCluster{
controlPlaneCount: 0,
bundlesName: "bundles-1",
}
err := r.RunAll(ctx, cluster)
g.Expect(err).To(HaveOccurred())
g.Expect(err).To(MatchError(ContainSubstring("invalid 1")))
g.Expect(err).To(MatchError(ContainSubstring("invalid 2")))
g.Expect(callCounter).To(Equal(2))
}
func TestRunnerRunAllAggregatedError(t *testing.T) {
e1 := errors.New("first error")
e2 := errors.New("second error")
e3 := errors.New("third error")
g := NewWithT(t)
ctx := context.Background()
r := validation.NewRunner[*apiCluster]()
r.Register(
func(ctx context.Context, _ *apiCluster) error {
return eksaerrors.NewAggregate([]error{e1, e2})
},
func(ctx context.Context, _ *apiCluster) error {
return e3
},
)
cluster := &apiCluster{}
err := r.RunAll(ctx, cluster)
g.Expect(err).To(HaveOccurred())
g.Expect(err.Errors()).To(ConsistOf(e1, e2, e3))
}
func TestRunnerRunAllPanicAfterModifyingObject(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
r := validation.NewRunner[*apiCluster]()
r.Register(
func(ctx context.Context, _ *apiCluster) error {
return nil
},
func(ctx context.Context, cluster *apiCluster) error {
cluster.controlPlaneCount = 5
return nil
},
)
cluster := &apiCluster{}
run := func() {
_ = r.RunAll(ctx, cluster)
}
g.Expect(run).To(PanicWith("validations must not modify the object under validation"))
}
type apiCluster struct {
controlPlaneCount int
bundlesName string
}
func (a *apiCluster) DeepCopy() *apiCluster {
copy := *a
return ©
}
| 137 |
eks-anywhere | aws | Go | package validations
import (
"context"
"errors"
"fmt"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
)
// ValidateOSForRegistryMirror checks if the OS is valid for the provided registry mirror configuration.
func ValidateOSForRegistryMirror(clusterSpec *cluster.Spec, provider providers.Provider) error {
cluster := clusterSpec.Cluster
if cluster.Spec.RegistryMirrorConfiguration == nil {
return nil
}
machineConfigs := provider.MachineConfigs(clusterSpec)
if !cluster.Spec.RegistryMirrorConfiguration.InsecureSkipVerify || machineConfigs == nil {
return nil
}
for _, mc := range machineConfigs {
if mc.OSFamily() == v1alpha1.Bottlerocket {
return errors.New("InsecureSkipVerify is not supported for bottlerocket")
}
}
return nil
}
func ValidateCertForRegistryMirror(clusterSpec *cluster.Spec, tlsValidator TlsValidator) error {
cluster := clusterSpec.Cluster
if cluster.Spec.RegistryMirrorConfiguration == nil {
return nil
}
if cluster.Spec.RegistryMirrorConfiguration.InsecureSkipVerify {
logger.V(1).Info("Warning: skip registry certificate verification is enabled", "registryMirrorConfiguration.insecureSkipVerify", true)
return nil
}
host, port := cluster.Spec.RegistryMirrorConfiguration.Endpoint, cluster.Spec.RegistryMirrorConfiguration.Port
authorityUnknown, err := tlsValidator.IsSignedByUnknownAuthority(host, port)
if err != nil {
return fmt.Errorf("validating registry mirror endpoint: %v", err)
}
if authorityUnknown {
logger.V(1).Info(fmt.Sprintf("Warning: registry mirror endpoint %s is using self-signed certs", cluster.Spec.RegistryMirrorConfiguration.Endpoint))
}
certContent := cluster.Spec.RegistryMirrorConfiguration.CACertContent
if certContent == "" && authorityUnknown {
return fmt.Errorf("registry %s is using self-signed certs, please provide the certificate using caCertContent field. Or use insecureSkipVerify field to skip registry certificate verification", cluster.Spec.RegistryMirrorConfiguration.Endpoint)
}
if certContent != "" {
if err = tlsValidator.ValidateCert(host, port, certContent); err != nil {
return fmt.Errorf("invalid registry certificate: %v", err)
}
}
return nil
}
// ValidateAuthenticationForRegistryMirror checks if REGISTRY_USERNAME and REGISTRY_PASSWORD is set if authenticated registry mirrors are used.
func ValidateAuthenticationForRegistryMirror(clusterSpec *cluster.Spec) error {
cluster := clusterSpec.Cluster
if cluster.Spec.RegistryMirrorConfiguration != nil && cluster.Spec.RegistryMirrorConfiguration.Authenticate {
_, _, err := config.ReadCredentials()
if err != nil {
return err
}
}
return nil
}
// ValidateManagementClusterName checks if the management cluster specified in the workload cluster spec is valid.
func ValidateManagementClusterName(ctx context.Context, k KubectlClient, mgmtCluster *types.Cluster, mgmtClusterName string) error {
cluster, err := k.GetEksaCluster(ctx, mgmtCluster, mgmtClusterName)
if err != nil {
return err
}
if cluster.IsManaged() {
return fmt.Errorf("%s is not a valid management cluster", mgmtClusterName)
}
return nil
}
// ValidateManagementClusterBundlesVersion checks if management cluster's bundle version
// is greater than or equal to the bundle version used to upgrade a workload cluster.
func ValidateManagementClusterBundlesVersion(ctx context.Context, k KubectlClient, mgmtCluster *types.Cluster, workload *cluster.Spec) error {
cluster, err := k.GetEksaCluster(ctx, mgmtCluster, mgmtCluster.Name)
if err != nil {
return err
}
if cluster.Spec.BundlesRef == nil {
return fmt.Errorf("management cluster bundlesRef cannot be nil")
}
mgmtBundles, err := k.GetBundles(ctx, mgmtCluster.KubeconfigFile, cluster.Spec.BundlesRef.Name, cluster.Spec.BundlesRef.Namespace)
if err != nil {
return err
}
if mgmtBundles.Spec.Number < workload.Bundles.Spec.Number {
return fmt.Errorf("cannot upgrade workload cluster with bundle spec.number %d while management cluster %s is on older bundle spec.number %d", workload.Bundles.Spec.Number, mgmtCluster.Name, mgmtBundles.Spec.Number)
}
return nil
}
| 117 |
eks-anywhere | aws | Go | package validations_test
import (
"context"
"errors"
"os"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers"
providermocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type clusterTest struct {
*WithT
tlsValidator *mocks.MockTlsValidator
kubectl *mocks.MockKubectlClient
provider *providermocks.MockProvider
clusterSpec *cluster.Spec
certContent string
host, port string
}
type clusterTestOpt func(t *testing.T, ct *clusterTest)
func newTest(t *testing.T, opts ...clusterTestOpt) *clusterTest {
ctrl := gomock.NewController(t)
cTest := &clusterTest{
WithT: NewWithT(t),
clusterSpec: test.NewClusterSpec(),
provider: providermocks.NewMockProvider(ctrl),
}
for _, opt := range opts {
opt(t, cTest)
}
return cTest
}
func withTLS() clusterTestOpt {
return func(t *testing.T, ct *clusterTest) {
ctrl := gomock.NewController(t)
host := "https://host.h"
port := "1111"
ct.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{
Endpoint: host,
Port: port,
}
ct.tlsValidator = mocks.NewMockTlsValidator(ctrl)
ct.certContent = "content"
ct.host = host
ct.port = port
}
}
func withKubectl() clusterTestOpt {
return func(t *testing.T, ct *clusterTest) {
ctrl := gomock.NewController(t)
ct.kubectl = mocks.NewMockKubectlClient(ctrl)
}
}
func TestValidateCertForRegistryMirrorNoRegistryMirror(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = nil
tt.Expect(validations.ValidateCertForRegistryMirror(tt.clusterSpec, tt.tlsValidator)).To(Succeed())
}
func TestValidateCertForRegistryMirrorCertInvalid(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.CACertContent = tt.certContent
tt.tlsValidator.EXPECT().IsSignedByUnknownAuthority(tt.host, tt.port).Return(false, nil)
tt.tlsValidator.EXPECT().ValidateCert(tt.host, tt.port, tt.certContent).Return(errors.New("invalid cert"))
tt.Expect(validations.ValidateCertForRegistryMirror(tt.clusterSpec, tt.tlsValidator)).To(
MatchError(ContainSubstring("invalid registry certificate: invalid cert")),
)
}
func TestValidateCertForRegistryMirrorCertValid(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.CACertContent = tt.certContent
tt.tlsValidator.EXPECT().IsSignedByUnknownAuthority(tt.host, tt.port).Return(false, nil)
tt.tlsValidator.EXPECT().ValidateCert(tt.host, tt.port, tt.certContent).Return(nil)
tt.Expect(validations.ValidateCertForRegistryMirror(tt.clusterSpec, tt.tlsValidator)).To(Succeed())
}
func TestValidateCertForRegistryMirrorNoCertIsSignedByKnownAuthority(t *testing.T) {
tt := newTest(t, withTLS())
tt.tlsValidator.EXPECT().IsSignedByUnknownAuthority(tt.host, tt.port).Return(false, nil)
tt.Expect(validations.ValidateCertForRegistryMirror(tt.clusterSpec, tt.tlsValidator)).To(Succeed())
}
func TestValidateCertForRegistryMirrorIsSignedByUnknownAuthority(t *testing.T) {
tt := newTest(t, withTLS())
tt.tlsValidator.EXPECT().IsSignedByUnknownAuthority(tt.host, tt.port).Return(true, nil)
tt.Expect(validations.ValidateCertForRegistryMirror(tt.clusterSpec, tt.tlsValidator)).To(
MatchError(ContainSubstring("registry https://host.h is using self-signed certs, please provide the certificate using caCertContent field")),
)
}
func TestValidateCertForRegistryMirrorInsecureSkip(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.InsecureSkipVerify = true
tt.Expect(validations.ValidateCertForRegistryMirror(tt.clusterSpec, tt.tlsValidator)).To(Succeed())
}
func TestValidateAuthenticationForRegistryMirrorNoRegistryMirror(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = nil
tt.Expect(validations.ValidateAuthenticationForRegistryMirror(tt.clusterSpec)).To(Succeed())
}
func TestValidateAuthenticationForRegistryMirrorNoAuth(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Authenticate = false
tt.Expect(validations.ValidateAuthenticationForRegistryMirror(tt.clusterSpec)).To(Succeed())
}
func TestValidateAuthenticationForRegistryMirrorAuthInvalid(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Authenticate = true
if err := os.Unsetenv("REGISTRY_USERNAME"); err != nil {
t.Fatalf(err.Error())
}
if err := os.Unsetenv("REGISTRY_PASSWORD"); err != nil {
t.Fatalf(err.Error())
}
tt.Expect(validations.ValidateAuthenticationForRegistryMirror(tt.clusterSpec)).To(
MatchError(ContainSubstring("please set REGISTRY_USERNAME env var")))
}
func TestValidateAuthenticationForRegistryMirrorAuthValid(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Authenticate = true
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
tt.Expect(validations.ValidateAuthenticationForRegistryMirror(tt.clusterSpec)).To(Succeed())
}
func TestValidateOSForRegistryMirrorNoRegistryMirror(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = nil
tt.Expect(validations.ValidateOSForRegistryMirror(tt.clusterSpec, tt.provider)).To(Succeed())
}
func TestValidateOSForRegistryMirrorInsecureSkipVerifyDisabled(t *testing.T) {
tt := newTest(t, withTLS())
tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.InsecureSkipVerify = false
tt.provider.EXPECT().MachineConfigs(tt.clusterSpec).Return([]providers.MachineConfig{})
tt.Expect(validations.ValidateOSForRegistryMirror(tt.clusterSpec, tt.provider)).To(Succeed())
}
func TestValidateOSForRegistryMirrorInsecureSkipVerifyEnabled(t *testing.T) {
tests := []struct {
name string
mirrorConfig *anywherev1.RegistryMirrorConfiguration
machineConfigs func() []providers.MachineConfig
wantErr string
}{
{
name: "insecureSkipVerify no machine configs",
machineConfigs: func() []providers.MachineConfig {
return nil
},
wantErr: "",
},
{
name: "insecureSkipVerify on provider with ubuntu",
machineConfigs: func() []providers.MachineConfig {
configs := make([]providers.MachineConfig, 0, 1)
configs = append(configs, &anywherev1.VSphereMachineConfig{
Spec: anywherev1.VSphereMachineConfigSpec{
OSFamily: anywherev1.Ubuntu,
},
})
return configs
},
wantErr: "",
},
{
name: "insecureSkipVerify on provider with bottlerocket",
machineConfigs: func() []providers.MachineConfig {
configs := make([]providers.MachineConfig, 0, 1)
configs = append(configs, &anywherev1.SnowMachineConfig{
Spec: anywherev1.SnowMachineConfigSpec{
OSFamily: anywherev1.Bottlerocket,
},
})
return configs
},
wantErr: "InsecureSkipVerify is not supported for bottlerocket",
},
{
name: "insecureSkipVerify on provider with redhat",
machineConfigs: func() []providers.MachineConfig {
configs := make([]providers.MachineConfig, 0, 1)
configs = append(configs, &anywherev1.VSphereMachineConfig{
Spec: anywherev1.VSphereMachineConfigSpec{
OSFamily: anywherev1.RedHat,
},
})
return configs
},
wantErr: "",
},
}
validationTest := newTest(t, func(t *testing.T, ct *clusterTest) {
ct.clusterSpec = test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{
InsecureSkipVerify: true,
}
})
})
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
validationTest.provider.EXPECT().MachineConfigs(validationTest.clusterSpec).Return(test.machineConfigs())
err := validations.ValidateOSForRegistryMirror(validationTest.clusterSpec, validationTest.provider)
if test.wantErr != "" {
validationTest.Expect(err).To(MatchError(test.wantErr))
} else {
validationTest.Expect(err).To(BeNil())
}
})
}
}
func TestValidateManagementClusterNameValid(t *testing.T) {
mgmtName := "test"
tt := newTest(t, withKubectl())
tt.clusterSpec.Cluster.Spec.ManagementCluster.Name = mgmtName
ctx := context.Background()
tt.kubectl.EXPECT().GetEksaCluster(ctx, managementCluster(mgmtName), mgmtName).Return(anywhereCluster(mgmtName), nil)
tt.Expect(validations.ValidateManagementClusterName(ctx, tt.kubectl, managementCluster(mgmtName), mgmtName)).To(Succeed())
}
func TestValidateManagementClusterNameEmptyValid(t *testing.T) {
mgmtName := "test"
tt := newTest(t, withKubectl())
tt.clusterSpec.Cluster.Spec.ManagementCluster.Name = mgmtName
ctx := context.Background()
mgmtCluster := anywhereCluster(mgmtName)
mgmtCluster.Spec.ManagementCluster.Name = ""
tt.kubectl.EXPECT().GetEksaCluster(ctx, managementCluster(mgmtName), mgmtName).Return(anywhereCluster(mgmtName), nil)
tt.Expect(validations.ValidateManagementClusterName(ctx, tt.kubectl, managementCluster(mgmtName), mgmtName)).To(Succeed())
}
func TestValidateManagementClusterNameNotExist(t *testing.T) {
mgmtName := "test"
tt := newTest(t, withKubectl())
tt.clusterSpec.Cluster.Spec.ManagementCluster.Name = mgmtName
ctx := context.Background()
tt.kubectl.EXPECT().GetEksaCluster(ctx, managementCluster(mgmtName), mgmtName).Return(nil, errors.New("test"))
tt.Expect(validations.ValidateManagementClusterName(ctx, tt.kubectl, managementCluster(mgmtName), mgmtName)).NotTo(Succeed())
}
func TestValidateManagementClusterNameWorkload(t *testing.T) {
mgmtName := "test"
tt := newTest(t, withKubectl())
tt.clusterSpec.Cluster.Spec.ManagementCluster.Name = mgmtName
ctx := context.Background()
eksaCluster := anywhereCluster(mgmtName)
eksaCluster.Spec.ManagementCluster.Name = "mgmtCluster"
tt.kubectl.EXPECT().GetEksaCluster(ctx, managementCluster(mgmtName), mgmtName).Return(eksaCluster, nil)
tt.Expect(validations.ValidateManagementClusterName(ctx, tt.kubectl, managementCluster(mgmtName), mgmtName)).NotTo(Succeed())
}
func managementCluster(name string) *types.Cluster {
return &types.Cluster{
Name: name,
}
}
func anywhereCluster(name string) *anywherev1.Cluster {
return &anywherev1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: anywherev1.ClusterSpec{
ManagementCluster: anywherev1.ManagementCluster{
Name: name,
},
},
}
}
func TestValidateManagementClusterBundlesVersion(t *testing.T) {
type testParam struct {
mgmtBundlesName string
mgmtBundlesNumber int
wkBundlesName string
wkBundlesNumber int
wantErr string
errGetEksaCluster error
errGetBundles error
}
testParams := []testParam{
{
mgmtBundlesName: "bundles-28",
mgmtBundlesNumber: 28,
wkBundlesName: "bundles-27",
wkBundlesNumber: 27,
wantErr: "",
},
{
mgmtBundlesName: "bundles-28",
mgmtBundlesNumber: 28,
wkBundlesName: "bundles-29",
wkBundlesNumber: 29,
wantErr: "cannot upgrade workload cluster with bundle spec.number 29 while management cluster management-cluster is on older bundle spec.number 28",
},
{
mgmtBundlesName: "bundles-28",
mgmtBundlesNumber: 28,
wkBundlesName: "bundles-27",
wkBundlesNumber: 27,
wantErr: "failed to reach cluster",
errGetEksaCluster: errors.New("failed to reach cluster"),
},
{
mgmtBundlesName: "bundles-28",
mgmtBundlesNumber: 28,
wkBundlesName: "bundles-27",
wkBundlesNumber: 27,
wantErr: "failed to reach cluster",
errGetBundles: errors.New("failed to reach cluster"),
},
}
for _, p := range testParams {
tt := newTest(t, withKubectl())
mgmtName := "management-cluster"
mgmtCluster := managementCluster(mgmtName)
mgmtClusterObject := anywhereCluster(mgmtName)
mgmtClusterObject.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: p.mgmtBundlesName,
Namespace: constants.EksaSystemNamespace,
}
tt.clusterSpec.Config.Cluster.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: p.wkBundlesName,
Namespace: constants.EksaSystemNamespace,
}
wkBundle := &releasev1alpha1.Bundles{
Spec: releasev1alpha1.BundlesSpec{
Number: p.wkBundlesNumber,
},
}
tt.clusterSpec.Bundles = wkBundle
mgmtBundle := &releasev1alpha1.Bundles{
Spec: releasev1alpha1.BundlesSpec{
Number: p.mgmtBundlesNumber,
},
}
ctx := context.Background()
tt.kubectl.EXPECT().GetEksaCluster(ctx, mgmtCluster, mgmtCluster.Name).Return(mgmtClusterObject, p.errGetEksaCluster)
if p.errGetEksaCluster == nil {
tt.kubectl.EXPECT().GetBundles(ctx, mgmtCluster.KubeconfigFile, mgmtClusterObject.Spec.BundlesRef.Name, mgmtClusterObject.Spec.BundlesRef.Namespace).Return(mgmtBundle, p.errGetBundles)
}
if p.wantErr == "" {
err := validations.ValidateManagementClusterBundlesVersion(ctx, tt.kubectl, mgmtCluster, tt.clusterSpec)
tt.Expect(err).To(BeNil())
} else {
err := validations.ValidateManagementClusterBundlesVersion(ctx, tt.kubectl, mgmtCluster, tt.clusterSpec)
tt.Expect(err.Error()).To(Equal(p.wantErr))
}
}
}
func TestValidateManagementClusterBundlesVersionMissingBundlesRef(t *testing.T) {
tt := newTest(t, withKubectl())
wantErr := "management cluster bundlesRef cannot be nil"
mgmtName := "management-cluster"
mgmtCluster := managementCluster(mgmtName)
mgmtClusterObject := anywhereCluster(mgmtName)
mgmtClusterObject.Spec.BundlesRef = nil
ctx := context.Background()
tt.kubectl.EXPECT().GetEksaCluster(ctx, mgmtCluster, mgmtCluster.Name).Return(mgmtClusterObject, nil)
err := validations.ValidateManagementClusterBundlesVersion(ctx, tt.kubectl, mgmtCluster, tt.clusterSpec)
tt.Expect(err.Error()).To(Equal(wantErr))
}
| 417 |
eks-anywhere | aws | Go | package validations
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
recommendedTotalMemory = 6200000000
requiredMajorVersion = 20
)
type DockerExecutable interface {
Version(ctx context.Context) (int, error)
AllocatedMemory(ctx context.Context) (uint64, error)
}
func CheckMinimumDockerVersion(ctx context.Context, dockerExecutable DockerExecutable) error {
installedMajorVersionInt, err := dockerExecutable.Version(ctx)
if err != nil {
return err
}
if installedMajorVersionInt < requiredMajorVersion {
return fmt.Errorf("minimum requirements for docker version have not been met. Install Docker version %d.x.x or above", requiredMajorVersion)
}
return nil
}
func CheckDockerAllocatedMemory(ctx context.Context, dockerExecutable DockerExecutable) {
totalMemoryAllocated, err := dockerExecutable.AllocatedMemory(ctx)
if err != nil {
logger.Error(err, "Failed to validate docker memory: error while reading memory allocated to Docker")
return
}
if totalMemoryAllocated < recommendedTotalMemory {
logger.V(3).Info("Warning: recommended memory to be allocated for Docker is 6 GB, please be aware that not allocating enough memory can cause problems while cluster creation")
}
}
func ValidateDockerExecutable(ctx context.Context, docker DockerExecutable, os string) error {
err := CheckMinimumDockerVersion(ctx, docker)
if err != nil {
return fmt.Errorf("failed to validate docker: %v", err)
}
CheckDockerAllocatedMemory(ctx, docker)
return nil
}
| 52 |
eks-anywhere | aws | Go | package validations_test
import (
"context"
"fmt"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
)
const (
requiredMajorVersion = 20
)
func TestValidateDockerVersion(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
wantErr error
dockerVersion int
}{
{
name: "FailureDockerVersion10",
dockerVersion: 19,
wantErr: fmt.Errorf("minimum requirements for docker version have not been met. Install Docker version %d.x.x or above", requiredMajorVersion),
},
{
name: "SuccessDockerVersion20",
dockerVersion: 20,
wantErr: nil,
},
{
name: "SuccessDockerVersion22",
dockerVersion: 22,
wantErr: nil,
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
mockCtrl := gomock.NewController(t)
dockerExecutableMock := mocks.NewMockDockerExecutable(mockCtrl)
dockerExecutableMock.EXPECT().Version(ctx).Return(tc.dockerVersion, tc.wantErr)
err := validations.CheckMinimumDockerVersion(ctx, dockerExecutableMock)
if err != tc.wantErr {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
func TestValidateDockerExecutable(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
wantErr error
dockerVersion int
dockerDesktopVersion int
}{
{
name: "SuccessDockerExecutable",
wantErr: nil,
dockerVersion: 21,
},
{
name: "FailureUnderMinDockerVersion",
wantErr: fmt.Errorf("failed to validate docker: minimum requirements for docker version have not been met. Install Docker version 20.x.x or above"),
dockerVersion: 19,
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
mockCtrl := gomock.NewController(t)
dockerExecutableMock := mocks.NewMockDockerExecutable(mockCtrl)
dockerExecutableMock.EXPECT().Version(ctx).Return(tc.dockerVersion, nil).AnyTimes()
dockerExecutableMock.EXPECT().AllocatedMemory(ctx).Return(uint64(6200000001), nil).AnyTimes()
err := validations.ValidateDockerExecutable(ctx, dockerExecutableMock, "linux")
if err != nil && err.Error() != tc.wantErr.Error() {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
| 90 |
eks-anywhere | aws | Go | package validations
import (
"fmt"
"strings"
)
type ValidationError struct {
Errs []string
}
func (v *ValidationError) Error() string {
return fmt.Sprintf("validation failed with %d errors: %s", len(v.Errs), strings.Join(v.Errs[:], ","))
}
func (v *ValidationError) String() string {
return v.Error()
}
| 19 |
eks-anywhere | aws | Go | package validations
import (
"errors"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
)
func ValidateAuthenticationForGitProvider(clusterSpec *cluster.Spec, cliConfig *config.CliConfig) error {
if clusterSpec.FluxConfig == nil || clusterSpec.FluxConfig.Spec.Git == nil {
return nil
}
if cliConfig == nil {
return nil
}
if cliConfig.GitPrivateKeyFile == "" {
return errors.New("provide a path to a private key file via the EKSA_GIT_PRIVATE_KEY in order to use the generic git Flux provider")
}
if !FileExistsAndIsNotEmpty(cliConfig.GitPrivateKeyFile) {
return fmt.Errorf("private key file does not exist at %s or is empty", cliConfig.GitPrivateKeyFile)
}
if cliConfig.GitKnownHostsFile == "" {
return errors.New("provide a path to an SSH Known Hosts file which contains a valid entry associate with the given private key via the EKSA_GIT_SSH_KNOWN_HOSTS environment variable")
}
if !FileExistsAndIsNotEmpty(cliConfig.GitKnownHostsFile) {
return fmt.Errorf("SSH known hosts file does not exist at %v or is empty", cliConfig.GitKnownHostsFile)
}
return nil
}
| 38 |
eks-anywhere | aws | Go | package validations_test
import (
"fmt"
"reflect"
"testing"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/validations"
)
const (
emptyVar = ""
testEnvVar = "test"
)
func TestValidateGitOpsGitProviderNoAuthForWorkloadCluster(t *testing.T) {
tests := []struct {
name string
wantErr error
git *v1alpha1.GitProviderConfig
cliConfig *config.CliConfig
}{
{
name: "Empty password and private key",
wantErr: fmt.Errorf("provide a path to a private key file via the EKSA_GIT_PRIVATE_KEY in order to use the generic git Flux provider"),
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: emptyVar,
GitSshKeyPassphrase: emptyVar,
GitKnownHostsFile: "testdata/git_nonempty_ssh_known_hosts",
},
},
{
name: "cliConfig nil",
wantErr: nil,
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: nil,
},
{
name: "Empty known host",
wantErr: fmt.Errorf("provide a path to an SSH Known Hosts file which contains a valid entry associate with the given private key via the EKSA_GIT_SSH_KNOWN_HOSTS environment variable"),
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: "testdata/git_nonempty_private_key",
GitSshKeyPassphrase: testEnvVar,
GitKnownHostsFile: emptyVar,
},
},
{
name: "Empty git config",
wantErr: nil,
git: nil,
cliConfig: nil,
},
{
name: "Empty password",
wantErr: fmt.Errorf("private key file does not exist at %s or is empty", testEnvVar),
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: testEnvVar,
GitSshKeyPassphrase: emptyVar,
GitKnownHostsFile: "testdata/git_nonempty_ssh_known_hosts",
},
},
{
name: "Empty private key file",
wantErr: fmt.Errorf("private key file does not exist at %s or is empty", "testdata/git_empty_file"),
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: "testdata/git_empty_file",
GitSshKeyPassphrase: emptyVar,
GitKnownHostsFile: "testdata/git_nonempty_ssh_known_hosts",
},
},
{
name: "Empty private key",
wantErr: fmt.Errorf("provide a path to a private key file via the EKSA_GIT_PRIVATE_KEY in order to use the generic git Flux provider"),
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: emptyVar,
GitSshKeyPassphrase: testEnvVar,
GitKnownHostsFile: "testdata/git_nonempty_ssh_known_hosts",
},
},
{
name: "Password and private key populated",
wantErr: nil,
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: "testdata/git_nonempty_private_key",
GitSshKeyPassphrase: testEnvVar,
GitKnownHostsFile: "testdata/git_nonempty_ssh_known_hosts",
},
},
{
name: "Empty known hosts",
wantErr: fmt.Errorf("SSH known hosts file does not exist at testdata/git_empty_file or is empty"),
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: "testdata/git_nonempty_private_key",
GitSshKeyPassphrase: testEnvVar,
GitKnownHostsFile: "testdata/git_empty_file",
},
},
{
name: "No known hosts",
wantErr: fmt.Errorf("SSH known hosts file does not exist at testdata/git_empty_file or is empty"),
git: &v1alpha1.GitProviderConfig{
RepositoryUrl: "testRepo",
},
cliConfig: &config.CliConfig{
GitPrivateKeyFile: "testdata/git_nonempty_private_key",
GitSshKeyPassphrase: testEnvVar,
GitKnownHostsFile: "testdata/git_empty_file",
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
defaultFlux := &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Git: tc.git,
},
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = "testcluster"
s.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{
Kind: v1alpha1.GitOpsConfigKind,
Name: "gitopstest",
}
s.FluxConfig = defaultFlux
s.Cluster.SetManagedBy("management-cluster")
})
_, _, cluster, _ := validations.NewKubectl(t)
cluster.Name = "management-cluster"
err := validations.ValidateAuthenticationForGitProvider(clusterSpec, tc.cliConfig)
if !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
| 166 |
eks-anywhere | aws | Go | package validations
import (
"errors"
"os"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func ValidateClusterNameArg(args []string) (string, error) {
if len(args) == 0 {
return "", errors.New("please specify a cluster name")
}
err := v1alpha1.ValidateClusterName(args[0])
if err != nil {
return args[0], err
}
err = v1alpha1.ValidateClusterNameLength(args[0])
if err != nil {
return args[0], err
}
return args[0], nil
}
func FileExists(filename string) bool {
_, err := os.Stat(filename)
return !os.IsNotExist(err)
}
func FileExistsAndIsNotEmpty(filename string) bool {
info, err := os.Stat(filename)
return err == nil && info.Size() > 0
}
| 34 |
eks-anywhere | aws | Go | package validations_test
import (
"errors"
"path/filepath"
"reflect"
"testing"
"github.com/aws/eks-anywhere/pkg/validations"
)
func TestOldClusterConfigExists(t *testing.T) {
tests := map[string]struct {
Filename string
Expect bool
}{
"Non existence should return false": {
Filename: "nonexistence",
Expect: false,
},
"Empty file should return false": {
Filename: "empty",
Expect: false,
},
"Non empty file should return true": {
Filename: "nonempty",
Expect: true,
},
}
for tn, td := range tests {
t.Run(tn, func(t *testing.T) {
filename := filepath.Join("testdata", td.Filename)
got := validations.FileExistsAndIsNotEmpty(filename)
if td.Expect != got {
t.Errorf("FileExistsAndIsNotEmpty(%v): want = %v; got = %v", filename, td.Expect, got)
}
})
}
}
func TestFileExists(t *testing.T) {
tests := []struct {
name string
filename string
exists bool
}{
{
name: "ExistingFile",
filename: "testdata/testfile",
exists: true,
},
{
name: "NonExistenceFile",
filename: "testdata/testfileNonExisting",
exists: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
got := validations.FileExists(tc.filename)
if tc.exists != got {
t.Errorf("%v got = %v, want %v", tc.name, got, tc.exists)
}
})
}
}
func TestValidateClusterNameArg(t *testing.T) {
tests := []struct {
name string
args []string
expectedError error
expectedArg string
}{
{
name: "Failure Empty Arguments",
args: []string{},
expectedError: errors.New("please specify a cluster name"),
expectedArg: "",
},
{
name: "Success Non-empty Arguments",
args: []string{"test-cluster"},
expectedError: nil,
expectedArg: "test-cluster",
},
{
name: "Failure Cluster Name",
args: []string{"test-cluster@123"},
expectedError: errors.New("test-cluster@123 is not a valid cluster name, cluster names must start with lowercase/uppercase letters and can include numbers and dashes. For instance 'testCluster-123' is a valid name but '123testCluster' is not. "),
expectedArg: "test-cluster@123",
},
{
name: "Failure Cluster Length",
args: []string{"qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345"},
expectedError: errors.New("number of characters in qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345 should be less than 81"),
expectedArg: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345",
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
gotArgs, gotError := validations.ValidateClusterNameArg(tc.args)
if !reflect.DeepEqual(tc.expectedError, gotError) || !reflect.DeepEqual(tc.expectedArg, gotArgs) {
t.Errorf("\n%v got Error = %v, want Error %v", tc.name, gotError, tc.expectedError)
t.Errorf("\n%v got Arguments = %v, want Arguments %v", tc.name, gotArgs, tc.expectedArg)
}
})
}
}
| 114 |
eks-anywhere | aws | Go | package validations
import (
"context"
"testing"
"github.com/golang/mock/gomock"
"k8s.io/apimachinery/pkg/runtime"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/types"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type KubectlClient interface {
List(ctx context.Context, kubeconfig string, list kubernetes.ObjectList) error
ValidateControlPlaneNodes(ctx context.Context, cluster *types.Cluster, clusterName string) error
ValidateWorkerNodes(ctx context.Context, clusterName string, kubeconfig string) error
ValidateNodes(ctx context.Context, kubeconfig string) error
ValidateClustersCRD(ctx context.Context, cluster *types.Cluster) error
ValidateEKSAClustersCRD(ctx context.Context, cluster *types.Cluster) error
Version(ctx context.Context, cluster *types.Cluster) (*executables.VersionResponse, error)
GetClusters(ctx context.Context, cluster *types.Cluster) ([]types.CAPICluster, error)
GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error)
GetBundles(ctx context.Context, kubeconfigFile, name, namespace string) (*releasev1alpha1.Bundles, error)
GetEksaGitOpsConfig(ctx context.Context, gitOpsConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.GitOpsConfig, error)
GetEksaFluxConfig(ctx context.Context, fluxConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.FluxConfig, error)
GetEksaOIDCConfig(ctx context.Context, oidcConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.OIDCConfig, error)
GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error)
GetEksaTinkerbellDatacenterConfig(ctx context.Context, tinkerbellDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.TinkerbellDatacenterConfig, error)
GetEksaTinkerbellMachineConfig(ctx context.Context, tinkerbellMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.TinkerbellMachineConfig, error)
GetEksaAWSIamConfig(ctx context.Context, awsIamConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.AWSIamConfig, error)
SearchIdentityProviderConfig(ctx context.Context, ipName string, kind string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error)
GetObject(ctx context.Context, resourceType, name, namespace, kubeconfig string, obj runtime.Object) error
}
func NewKubectl(t *testing.T) (*executables.Kubectl, context.Context, *types.Cluster, *mockexecutables.MockExecutable) {
kubeconfigFile := "c.kubeconfig"
cluster := &types.Cluster{
KubeconfigFile: kubeconfigFile,
}
ctx := context.Background()
ctrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(ctrl)
return executables.NewKubectl(executable), ctx, cluster, executable
}
| 52 |
eks-anywhere | aws | Go | package validations
// ProcessValidationResults is currently used for unit test processing.
func ProcessValidationResults(validations []Validation) error {
var errs []string
results := make([]ValidationResult, 0, len(validations))
for _, validation := range validations {
results = append(results, *validation())
}
for _, result := range results {
if result.Err != nil {
errs = append(errs, result.Err.Error())
} else if !result.Silent {
result.LogPass()
}
}
if len(errs) > 0 {
return &ValidationError{Errs: errs}
}
return nil
}
| 23 |
eks-anywhere | aws | Go | package validations
import "errors"
var errRunnerValidation = errors.New("validations failed")
type Validation func() *ValidationResult
type Runner struct {
validations []Validation
}
func NewRunner() *Runner {
return &Runner{validations: make([]Validation, 0)}
}
func (r *Runner) Register(validations ...Validation) {
r.validations = append(r.validations, validations...)
}
func (r *Runner) Run() error {
failed := false
for _, v := range r.validations {
result := v()
result.Report()
if result.Err != nil {
failed = true
}
}
if failed {
return errRunnerValidation
}
return nil
}
| 37 |
eks-anywhere | aws | Go | package validations_test
import (
"errors"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/validations"
)
func TestRunnerRunError(t *testing.T) {
g := NewWithT(t)
r := validations.NewRunner()
r.Register(func() *validations.ValidationResult {
return &validations.ValidationResult{
Err: nil,
}
})
r.Register(func() *validations.ValidationResult {
return &validations.ValidationResult{
Err: errors.New("failed"),
}
})
err := r.Run()
g.Expect(err).NotTo(BeNil())
g.Expect(err.Error()).To(Equal("validations failed"))
}
func TestRunnerRunSuccess(t *testing.T) {
g := NewWithT(t)
r := validations.NewRunner()
r.Register(func() *validations.ValidationResult {
return &validations.ValidationResult{
Err: nil,
}
})
r.Register(func() *validations.ValidationResult {
return &validations.ValidationResult{
Err: nil,
}
})
g.Expect(r.Run()).To(Succeed())
}
| 47 |
eks-anywhere | aws | Go | package validations
type TlsValidator interface {
ValidateCert(host, port, caCertContent string) error
IsSignedByUnknownAuthority(host, port string) (bool, error)
}
| 7 |
eks-anywhere | aws | Go | package validations
import (
"unicode"
"github.com/aws/eks-anywhere/pkg/logger"
)
type ValidationResult struct {
Name string
Err error
Remediation string
Silent bool
}
func (v *ValidationResult) Report() {
if v.Err != nil {
logger.MarkFail("Validation failed", "validation", v.Name, "error", v.Err, "remediation", v.Remediation)
return
}
if !v.Silent {
v.LogPass()
}
}
func (v *ValidationResult) LogPass() {
logger.MarkPass(capitalize(v.Name))
}
func capitalize(s string) string {
if len(s) == 0 {
return s
}
runes := []rune(s)
runes[0] = unicode.ToUpper(runes[0])
return string(runes)
}
| 39 |
eks-anywhere | aws | Go | package validations
import (
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/crypto"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
)
type Opts struct {
Kubectl KubectlClient
Spec *cluster.Spec
WorkloadCluster *types.Cluster
ManagementCluster *types.Cluster
Provider providers.Provider
TLSValidator TlsValidator
CliConfig *config.CliConfig
SkippedValidations map[string]bool
}
func (o *Opts) SetDefaults() {
if o.TLSValidator == nil {
o.TLSValidator = crypto.NewTlsValidator()
}
}
| 27 |
eks-anywhere | aws | Go | package createcluster
import (
"context"
"fmt"
"runtime"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/gitops/flux"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/validations"
)
type ValidationManager struct {
clusterSpec *cluster.Spec
provider providers.Provider
gitOpsFlux *flux.Flux
createValidations Validator
dockerExec validations.DockerExecutable
}
type Validator interface {
PreflightValidations(ctx context.Context) []validations.Validation
}
func NewValidations(clusterSpec *cluster.Spec, provider providers.Provider, gitOpsFlux *flux.Flux, createValidations Validator, dockerExec validations.DockerExecutable) *ValidationManager {
return &ValidationManager{
clusterSpec: clusterSpec,
provider: provider,
gitOpsFlux: gitOpsFlux,
createValidations: createValidations,
dockerExec: dockerExec,
}
}
func (v *ValidationManager) Validate(ctx context.Context) error {
runner := validations.NewRunner()
runner.Register(v.generateCreateValidations(ctx)...)
runner.Register(v.gitOpsFlux.Validations(ctx, v.clusterSpec)...)
err := runner.Run()
return err
}
func (v *ValidationManager) generateCreateValidations(ctx context.Context) []validations.Validation {
vs := []validations.Validation{
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate docker executable",
Err: validations.ValidateDockerExecutable(ctx, v.dockerExec, runtime.GOOS),
Silent: true,
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate kubeconfig path",
Err: kubeconfig.ValidateKubeconfigPath(v.clusterSpec.Cluster.Name),
Silent: true,
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate cluster",
Err: cluster.ValidateConfig(v.clusterSpec.Config),
Silent: true,
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: fmt.Sprintf("validate %s Provider", v.provider.Name()),
Err: v.provider.SetupAndValidateCreateCluster(ctx, v.clusterSpec),
}
},
}
vs = append(vs, v.createValidations.PreflightValidations(ctx)...)
return vs
}
| 81 |
eks-anywhere | aws | Go | package createcluster_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/gitops/flux"
providermocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/createcluster"
createmocks "github.com/aws/eks-anywhere/pkg/validations/createcluster/mocks"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
)
type createClusterValidationTest struct {
clusterSpec *cluster.Spec
ctx context.Context
kubectl *mocks.MockKubectlClient
provider *providermocks.MockProvider
flux *flux.Flux
docker *mocks.MockDockerExecutable
createValidations *createmocks.MockValidator
}
func newValidateTest(t *testing.T) *createClusterValidationTest {
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockKubectlClient(mockCtrl)
provider := providermocks.NewMockProvider(mockCtrl)
docker := mocks.NewMockDockerExecutable(mockCtrl)
createValidations := createmocks.NewMockValidator(mockCtrl)
return &createClusterValidationTest{
ctx: context.Background(),
kubectl: kubectl,
provider: provider,
docker: docker,
createValidations: createValidations,
}
}
func (c *createClusterValidationTest) expectValidDockerClusterSpec() {
s := test.NewClusterSpec()
s.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: v1alpha1.ClusterSpec{
ClusterNetwork: v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{},
},
Pods: v1alpha1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: v1alpha1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 1,
},
DatacenterRef: v1alpha1.Ref{
Kind: "DockerDatacenterConfig",
Name: "eksa-unit-test",
},
ExternalEtcdConfiguration: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
},
KubernetesVersion: v1alpha1.GetClusterDefaultKubernetesVersion(),
ManagementCluster: v1alpha1.ManagementCluster{
Name: "dev-cluster",
},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(1),
}},
},
}
s.DockerDatacenter = &v1alpha1.DockerDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: v1alpha1.DockerDatacenterConfigSpec{},
}
c.clusterSpec = s
}
func (c *createClusterValidationTest) expectValidProvider() {
c.provider.EXPECT().SetupAndValidateCreateCluster(c.ctx, c.clusterSpec).Return(nil).AnyTimes()
c.provider.EXPECT().Name().Return("docker").AnyTimes()
}
func (c *createClusterValidationTest) expectValidDockerExec() {
c.docker.EXPECT().Version(c.ctx).Return(21, nil).AnyTimes()
c.docker.EXPECT().AllocatedMemory(c.ctx).Return(uint64(6200000001), nil).AnyTimes()
}
func (c *createClusterValidationTest) expectEmptyFlux() {
c.flux = flux.NewFluxFromGitOpsFluxClient(nil, nil, nil, nil)
}
type validation struct {
run bool
}
func (c *createClusterValidationTest) expectBuildValidations() *validation {
v := &validation{}
c.createValidations.EXPECT().PreflightValidations(c.ctx).Return(
[]validations.Validation{
func() *validations.ValidationResult {
v.run = true
return &validations.ValidationResult{
Err: nil,
}
},
},
)
return v
}
func TestCreateClusterValidationsSuccess(t *testing.T) {
g := NewWithT(t)
test := newValidateTest(t)
test.expectValidDockerClusterSpec()
test.expectValidProvider()
test.expectEmptyFlux()
test.expectValidDockerExec()
validationFromBuild := test.expectBuildValidations()
commandVal := createcluster.NewValidations(test.clusterSpec, test.provider, test.flux, test.createValidations, test.docker)
g.Expect(commandVal.Validate(test.ctx)).To(Succeed())
g.Expect(validationFromBuild.run).To(BeTrue(), "validation coming from BuildValidations should be run")
}
| 143 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/validations/createcluster/createcluster.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
validations "github.com/aws/eks-anywhere/pkg/validations"
gomock "github.com/golang/mock/gomock"
)
// MockValidator is a mock of Validator interface.
type MockValidator struct {
ctrl *gomock.Controller
recorder *MockValidatorMockRecorder
}
// MockValidatorMockRecorder is the mock recorder for MockValidator.
type MockValidatorMockRecorder struct {
mock *MockValidator
}
// NewMockValidator creates a new mock instance.
func NewMockValidator(ctrl *gomock.Controller) *MockValidator {
mock := &MockValidator{ctrl: ctrl}
mock.recorder = &MockValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockValidator) EXPECT() *MockValidatorMockRecorder {
return m.recorder
}
// PreflightValidations mocks base method.
func (m *MockValidator) PreflightValidations(ctx context.Context) []validations.Validation {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PreflightValidations", ctx)
ret0, _ := ret[0].([]validations.Validation)
return ret0
}
// PreflightValidations indicates an expected call of PreflightValidations.
func (mr *MockValidatorMockRecorder) PreflightValidations(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreflightValidations", reflect.TypeOf((*MockValidator)(nil).PreflightValidations), ctx)
}
| 51 |
eks-anywhere | aws | Go | package createvalidations
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
func ValidateClusterNameIsUnique(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster, clusterName string) error {
c, err := k.GetClusters(ctx, cluster)
if err != nil {
return err
}
for _, capiCluster := range c {
if capiCluster.Metadata.Name == clusterName {
return fmt.Errorf("cluster name %s already exists", cluster.Name)
}
}
return nil
}
func ValidateManagementCluster(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster) error {
if err := k.ValidateClustersCRD(ctx, cluster); err != nil {
return err
}
return k.ValidateEKSAClustersCRD(ctx, cluster)
}
| 30 |
eks-anywhere | aws | Go | package createvalidations_test
import (
"bytes"
"errors"
"fmt"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/createvalidations"
)
const testclustername string = "testcluster"
type UnAuthKubectlClient struct {
*executables.Kubectl
*kubernetes.UnAuthClient
}
func TestValidateClusterPresent(t *testing.T) {
tests := []struct {
name string
wantErr error
upgradeVersion v1alpha1.KubernetesVersion
getClusterResponse string
}{
{
name: "SuccessNoClusters",
wantErr: nil,
getClusterResponse: "testdata/empty_get_cluster_response.json",
},
{
name: "FailureClusterNameExists",
wantErr: errors.New("cluster name testcluster already exists"),
getClusterResponse: "testdata/cluster_name_exists.json",
},
{
name: "SuccessClusterNotInList",
wantErr: nil,
getClusterResponse: "testdata/name_not_in_list.json",
},
}
k, ctx, cluster, e := validations.NewKubectl(t)
uk := kubernetes.NewUnAuthClient(k)
cluster.Name = testclustername
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
fileContent := test.ReadFile(t, tc.getClusterResponse)
e.EXPECT().Execute(ctx, []string{"get", capiClustersResourceType, "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}).Return(*bytes.NewBufferString(fileContent), nil)
err := createvalidations.ValidateClusterNameIsUnique(ctx, UnAuthKubectlClient{k, uk}, cluster, testclustername)
if !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
func TestValidateManagementClusterCRDs(t *testing.T) {
tests := []struct {
name string
wantErr bool
errGetClusterCRD error
errGetClusterCRDCount int
errGetEKSAClusterCRD error
errGetEKSAClusterCRDCount int
}{
{
name: "Success",
wantErr: false,
errGetClusterCRD: nil,
errGetClusterCRDCount: 1,
errGetEKSAClusterCRD: nil,
errGetEKSAClusterCRDCount: 1,
},
{
name: "FailureClusterCRDDoesNotExist",
wantErr: true,
errGetClusterCRD: errors.New("cluster CRD does not exist"),
errGetClusterCRDCount: 1,
errGetEKSAClusterCRD: nil,
errGetEKSAClusterCRDCount: 0,
},
{
name: "FailureEKSAClusterCRDDoesNotExist",
wantErr: true,
errGetClusterCRD: nil,
errGetClusterCRDCount: 1,
errGetEKSAClusterCRD: errors.New("eksa cluster CRDS do not exist"),
errGetEKSAClusterCRDCount: 1,
},
}
k, ctx, cluster, e := validations.NewKubectl(t)
uk := kubernetes.NewUnAuthClient(k)
cluster.Name = testclustername
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
e.EXPECT().Execute(ctx, []string{"get", "customresourcedefinition", capiClustersResourceType, "--kubeconfig", cluster.KubeconfigFile}).Return(bytes.Buffer{}, tc.errGetClusterCRD).Times(tc.errGetClusterCRDCount)
e.EXPECT().Execute(ctx, []string{"get", "customresourcedefinition", eksaClusterResourceType, "--kubeconfig", cluster.KubeconfigFile}).Return(bytes.Buffer{}, tc.errGetEKSAClusterCRD).Times(tc.errGetEKSAClusterCRDCount)
err := createvalidations.ValidateManagementCluster(ctx, UnAuthKubectlClient{k, uk}, cluster)
if tc.wantErr {
assert.Error(tt, err, "expected ValidateManagementCluster to return an error", "test", tc.name)
} else {
assert.NoError(tt, err, "expected ValidateManagementCluster not to return an error", "test", tc.name)
}
})
}
}
var (
capiClustersResourceType = fmt.Sprintf("clusters.%s", clusterv1.GroupVersion.Group)
eksaClusterResourceType = fmt.Sprintf("clusters.%s", v1alpha1.GroupVersion.Group)
)
| 124 |
eks-anywhere | aws | Go | package createvalidations
import (
"github.com/aws/eks-anywhere/pkg/validations"
)
func New(opts *validations.Opts) *CreateValidations {
opts.SetDefaults()
return &CreateValidations{Opts: opts}
}
type CreateValidations struct {
Opts *validations.Opts
}
| 15 |
eks-anywhere | aws | Go | package createvalidations
import (
"context"
"errors"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
var (
clusterResourceType = fmt.Sprintf("clusters.%s", v1alpha1.GroupVersion.Group)
fluxConfigResourceType = fmt.Sprintf("fluxconfigs.%s", v1alpha1.GroupVersion.Group)
gitOpsConfigResourceType = fmt.Sprintf("gitopsconfigs.%s", v1alpha1.GroupVersion.Group)
)
func ValidateGitOps(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if err := validateGitOpsConfig(ctx, k, cluster, clusterSpec); err != nil {
return fmt.Errorf("invalid gitOpsConfig: %v", err)
}
if err := validateFluxConfig(ctx, k, cluster, clusterSpec); err != nil {
return fmt.Errorf("invalid fluxConfig: %v", err)
}
return nil
}
// validateGitOpsConfig method will be removed in a future release since gitOpsConfig is deprecated in favor of fluxConfig.
func validateGitOpsConfig(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if clusterSpec.GitOpsConfig == nil {
return nil
}
gitOpsConfig := &v1alpha1.GitOpsConfig{}
err := k.GetObject(ctx, gitOpsConfigResourceType, clusterSpec.GitOpsConfig.Name, clusterSpec.Cluster.Namespace, cluster.KubeconfigFile, gitOpsConfig)
if err == nil {
return fmt.Errorf("gitOpsConfig %s already exists", clusterSpec.Cluster.Spec.GitOpsRef.Name)
}
if !apierrors.IsNotFound(err) {
return fmt.Errorf("fetching gitOpsConfig in cluster: %v", err)
}
mgmtCluster := &v1alpha1.Cluster{}
if err := k.GetObject(ctx, clusterResourceType, clusterSpec.Cluster.ManagedBy(), clusterSpec.Cluster.Namespace, cluster.KubeconfigFile, mgmtCluster); err != nil {
return err
}
mgmtGitOpsConfig := &v1alpha1.GitOpsConfig{}
if err := k.GetObject(ctx, gitOpsConfigResourceType, mgmtCluster.Spec.GitOpsRef.Name, clusterSpec.Cluster.Namespace, cluster.KubeconfigFile, mgmtGitOpsConfig); err != nil {
return err
}
if !mgmtGitOpsConfig.Spec.Equal(&clusterSpec.GitOpsConfig.Spec) {
return errors.New("expected gitOpsConfig.spec to be the same between management and its workload clusters")
}
return nil
}
func validateFluxConfig(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if clusterSpec.FluxConfig == nil {
return nil
}
// when processing deprecated gitopsConfig, we parse and convert it to fluxConfig.
// for this case both fluxConfig and gitopsConfig can exist in spec. Skip fluxConfig validation.
if clusterSpec.GitOpsConfig != nil {
return nil
}
fluxConfig := &v1alpha1.FluxConfig{}
err := k.GetObject(ctx, fluxConfigResourceType, clusterSpec.FluxConfig.Name, clusterSpec.Cluster.Namespace, cluster.KubeconfigFile, fluxConfig)
if err == nil {
return fmt.Errorf("fluxConfig %s already exists", clusterSpec.Cluster.Spec.GitOpsRef.Name)
}
if !apierrors.IsNotFound(err) {
return fmt.Errorf("fetching fluxConfig in cluster: %v", err)
}
mgmtCluster := &v1alpha1.Cluster{}
if err := k.GetObject(ctx, clusterResourceType, clusterSpec.Cluster.ManagedBy(), clusterSpec.Cluster.Namespace, cluster.KubeconfigFile, mgmtCluster); err != nil {
return err
}
mgmtFluxConfig := &v1alpha1.FluxConfig{}
if err := k.GetObject(ctx, fluxConfigResourceType, mgmtCluster.Spec.GitOpsRef.Name, clusterSpec.Cluster.Namespace, cluster.KubeconfigFile, mgmtFluxConfig); err != nil {
return err
}
if !mgmtFluxConfig.Spec.Equal(&clusterSpec.FluxConfig.Spec) {
return errors.New("expected fluxConfig.spec to be the same between management and its workload clusters")
}
return nil
}
| 102 |
eks-anywhere | aws | Go | package createvalidations_test
import (
"context"
"errors"
"testing"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/validations/createvalidations"
)
func TestValidateGitOpsConfigFluxConfigBothNil(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(Succeed())
}
func TestValidateGitOpsConfigGetGitOpsConfigError(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.GitOpsConfig = &v1alpha1.GitOpsConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(errors.New("error get gitOpsConfig"))
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("error get gitOpsConfig")))
}
func TestValidateGitOpsConfigNameExists(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.GitOpsConfig = &v1alpha1.GitOpsConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(nil)
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("gitOpsConfig gitops already exists")))
}
func TestValidateGitOpsConfigGetClusterError(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.GitOpsConfig = &v1alpha1.GitOpsConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).Return(errors.New("error get cluster"))
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("error get cluster")))
}
func TestValidateGitOpsConfigGetMgmtGitOpsConfigError(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.GitOpsConfig = &v1alpha1.GitOpsConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().
GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.Cluster) error {
obj.Spec = v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "gitops",
},
}
return nil
})
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "gitops", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(errors.New("error get gitops"))
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("error get gitops")))
}
func TestValidateGitOpsConfigNotEqual(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.GitOpsConfig = &v1alpha1.GitOpsConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().
GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.Cluster) error {
obj.Spec = v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "gitops",
},
}
return nil
})
tt.k.EXPECT().
GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "gitops", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.GitOpsConfig) error {
obj.Spec = v1alpha1.GitOpsConfigSpec{
Flux: v1alpha1.Flux{
Github: v1alpha1.Github{
FluxSystemNamespace: "custom",
},
},
}
return nil
})
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("expected gitOpsConfig.spec to be the same")))
}
func TestValidateGitOpsConfigSuccess(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.FluxConfig = &v1alpha1.FluxConfig{}
tt.c.Opts.Spec.GitOpsConfig = &v1alpha1.GitOpsConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().
GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.Cluster) error {
obj.Spec = v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "gitops",
},
}
return nil
})
tt.k.EXPECT().GetObject(tt.ctx, "gitopsconfigs.anywhere.eks.amazonaws.com", "gitops", "", "kubeconfig", &v1alpha1.GitOpsConfig{}).Return(nil)
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(Succeed())
}
func TestValidateFluxConfigGetFluxConfigError(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.FluxConfig = &v1alpha1.FluxConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(errors.New("error get fluxConfig"))
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("error get fluxConfig")))
}
func TestValidateFluxConfigNameExists(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.FluxConfig = &v1alpha1.FluxConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(nil)
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("fluxConfig gitops already exists")))
}
func TestValidateFluxConfigGetClusterError(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.FluxConfig = &v1alpha1.FluxConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).Return(errors.New("error get cluster"))
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("error get cluster")))
}
func TestValidateFluxConfigGetMgmtFluxConfigError(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.FluxConfig = &v1alpha1.FluxConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().
GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.Cluster) error {
obj.Spec = v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "gitops",
},
}
return nil
})
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "gitops", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(errors.New("error get gitops"))
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("error get gitops")))
}
func TestValidateFluxConfigNotEqual(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.FluxConfig = &v1alpha1.FluxConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().
GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.Cluster) error {
obj.Spec = v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "gitops",
},
}
return nil
})
tt.k.EXPECT().
GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "gitops", "", "kubeconfig", &v1alpha1.FluxConfig{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.FluxConfig) error {
obj.Spec = v1alpha1.FluxConfigSpec{
SystemNamespace: "custom",
}
return nil
})
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(MatchError(ContainSubstring("expected fluxConfig.spec to be the same")))
}
func TestValidateFluxConfigSuccess(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.c.Opts.Spec.FluxConfig = &v1alpha1.FluxConfig{}
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
tt.k.EXPECT().
GetObject(tt.ctx, "clusters.anywhere.eks.amazonaws.com", "", "", "kubeconfig", &v1alpha1.Cluster{}).
DoAndReturn(func(_ context.Context, _, _, _, _ string, obj *v1alpha1.Cluster) error {
obj.Spec = v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "gitops",
},
}
return nil
})
tt.k.EXPECT().GetObject(tt.ctx, "fluxconfigs.anywhere.eks.amazonaws.com", "gitops", "", "kubeconfig", &v1alpha1.FluxConfig{}).Return(nil)
tt.Expect(createvalidations.ValidateGitOps(tt.ctx, tt.k, tt.c.Opts.ManagementCluster, tt.c.Opts.Spec)).To(Succeed())
}
| 191 |
eks-anywhere | aws | Go | package createvalidations
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
func ValidateIdentityProviderNameIsUnique(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster, spec *cluster.Spec) error {
if len(spec.Cluster.Spec.IdentityProviderRefs) == 0 || spec.Cluster.IsSelfManaged() {
logger.V(5).Info("skipping ValidateIdentityProviderNameIsUnique")
return nil
}
var existingIR []string
for _, ir := range spec.Cluster.Spec.IdentityProviderRefs {
eIR, err := k.SearchIdentityProviderConfig(ctx, ir.Name, ir.Kind, cluster.KubeconfigFile, spec.Cluster.Namespace)
if err != nil {
return err
}
if len(eIR) > 0 {
existingIR = append(existingIR, eIR[0].Name)
}
}
if len(existingIR) > 0 {
return fmt.Errorf("the following identityProviders already exists %s", existingIR)
}
return nil
}
| 35 |
eks-anywhere | aws | Go | package createvalidations_test
import (
"bytes"
"errors"
"fmt"
"reflect"
"testing"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/createvalidations"
)
var oidcResourceType = fmt.Sprintf("oidcconfigs.%s", v1alpha1.GroupVersion.Group)
func TestValidateIdendityProviderForWorkloadClusters(t *testing.T) {
tests := []struct {
name string
wantErr error
upgradeVersion v1alpha1.KubernetesVersion
getClusterResponse string
}{
{
name: "SuccessNoIdentityProvider",
wantErr: nil,
getClusterResponse: "testdata/empty_get_identity_provider_response.json",
},
{
name: "FailureIdentityProviderNameExists",
wantErr: errors.New("the following identityProviders already exists [oidc-config-test]"),
getClusterResponse: "testdata/identity_provider_name_exists.json",
},
}
defaultOIDC := &v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "client-id",
GroupsClaim: "groups-claim",
GroupsPrefix: "groups-prefix",
IssuerUrl: "issuer-url",
RequiredClaims: []v1alpha1.OIDCConfigRequiredClaim{{
Claim: "claim",
Value: "value",
}},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = testclustername
s.Cluster.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "oidc-config-test",
},
}
s.Cluster.SetManagedBy("management-cluster")
s.OIDCConfig = defaultOIDC
})
k, ctx, cluster, e := validations.NewKubectl(t)
uk := kubernetes.NewUnAuthClient(k)
cluster.Name = testclustername
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
fileContent := test.ReadFile(t, tc.getClusterResponse)
e.EXPECT().Execute(
ctx, []string{
"get", oidcResourceType, "-o", "json", "--kubeconfig",
cluster.KubeconfigFile, "--namespace", clusterSpec.Cluster.Namespace,
"--field-selector=metadata.name=oidc-config-test",
}).Return(*bytes.NewBufferString(fileContent), nil)
err := createvalidations.ValidateIdentityProviderNameIsUnique(ctx, UnAuthKubectlClient{k, uk}, cluster, clusterSpec)
if !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
func TestValidateIdentityProviderForSelfManagedCluster(t *testing.T) {
tests := []struct {
name string
wantErr error
upgradeVersion v1alpha1.KubernetesVersion
getClusterResponse string
}{
{
name: "Skip Validate GitOpsConfig name",
wantErr: nil,
getClusterResponse: "testdata/empty_get_identity_provider_response.json",
},
}
defaultOIDC := &v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "client-id",
GroupsClaim: "groups-claim",
GroupsPrefix: "groups-prefix",
IssuerUrl: "issuer-url",
RequiredClaims: []v1alpha1.OIDCConfigRequiredClaim{{
Claim: "claim",
Value: "value",
}},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = testclustername
s.Cluster.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "oidc-config-test",
},
}
s.OIDCConfig = defaultOIDC
s.Cluster.SetSelfManaged()
})
k, ctx, cluster, e := validations.NewKubectl(t)
uk := kubernetes.NewUnAuthClient(k)
cluster.Name = testclustername
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
e.EXPECT().Execute(
ctx, []string{
"get", oidcResourceType, "-o", "json", "--kubeconfig",
cluster.KubeconfigFile, "--namespace", clusterSpec.Cluster.Namespace,
"--field-selector=metadata.name=oidc-config-test",
}).Times(0)
err := createvalidations.ValidateIdentityProviderNameIsUnique(ctx, UnAuthKubectlClient{k, uk}, cluster, clusterSpec)
if !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
| 146 |
eks-anywhere | aws | Go | package createvalidations
import (
"context"
"fmt"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
// PreflightValidations returns the validations required before creating a cluster.
func (v *CreateValidations) PreflightValidations(ctx context.Context) []validations.Validation {
k := v.Opts.Kubectl
targetCluster := &types.Cluster{
Name: v.Opts.WorkloadCluster.Name,
KubeconfigFile: v.Opts.ManagementCluster.KubeconfigFile,
}
createValidations := []validations.Validation{
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate OS is compatible with registry mirror configuration",
Remediation: "please use a valid OS for your registry mirror configuration",
Err: validations.ValidateOSForRegistryMirror(v.Opts.Spec, v.Opts.Provider),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate certificate for registry mirror",
Remediation: fmt.Sprintf("provide a valid certificate for you registry endpoint using %s env var", anywherev1.RegistryMirrorCAKey),
Err: validations.ValidateCertForRegistryMirror(v.Opts.Spec, v.Opts.TLSValidator),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate authentication for git provider",
Remediation: fmt.Sprintf("ensure %s, %s env variable are set and valid", config.EksaGitPrivateKeyTokenEnv, config.EksaGitKnownHostsFileEnv),
Err: validations.ValidateAuthenticationForGitProvider(v.Opts.Spec, v.Opts.CliConfig),
}
},
}
if v.Opts.Spec.Cluster.IsManaged() {
createValidations = append(
createValidations,
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate cluster name",
Remediation: "",
Err: ValidateClusterNameIsUnique(ctx, k, targetCluster, v.Opts.Spec.Cluster.Name),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate gitops",
Remediation: "",
Err: ValidateGitOps(ctx, k, v.Opts.ManagementCluster, v.Opts.Spec),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate identity providers' name",
Remediation: "",
Err: ValidateIdentityProviderNameIsUnique(ctx, k, targetCluster, v.Opts.Spec),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate management cluster has eksa crds",
Remediation: "",
Err: ValidateManagementCluster(ctx, k, targetCluster),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate management cluster name is valid",
Remediation: "Specify a valid management cluster in the cluster spec. This cannot be a workload cluster that is managed by a different " +
"management cluster.",
Err: validations.ValidateManagementClusterName(ctx, k, v.Opts.ManagementCluster, v.Opts.Spec.Cluster.Spec.ManagementCluster.Name),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate management cluster bundle version compatibility",
Remediation: fmt.Sprintf("upgrade management cluster %s before creating workload cluster %s", v.Opts.Spec.Cluster.ManagedBy(), v.Opts.WorkloadCluster.Name),
Err: validations.ValidateManagementClusterBundlesVersion(ctx, k, v.Opts.ManagementCluster, v.Opts.Spec),
}
},
)
}
return createValidations
}
| 97 |
eks-anywhere | aws | Go | package createvalidations_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/createvalidations"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type preflightValidationsTest struct {
*WithT
ctx context.Context
k *mocks.MockKubectlClient
c *createvalidations.CreateValidations
}
func newPreflightValidationsTest(t *testing.T) *preflightValidationsTest {
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlClient(ctrl)
c := &types.Cluster{
KubeconfigFile: "kubeconfig",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{
Name: "gitops",
}
})
opts := &validations.Opts{
Kubectl: k,
Spec: clusterSpec,
WorkloadCluster: c,
ManagementCluster: c,
}
return &preflightValidationsTest{
WithT: NewWithT(t),
ctx: context.Background(),
k: k,
c: createvalidations.New(opts),
}
}
func TestPreFlightValidationsGitProvider(t *testing.T) {
tt := newPreflightValidationsTest(t)
tt.Expect(validations.ProcessValidationResults(tt.c.PreflightValidations(tt.ctx))).To(Succeed())
}
func TestPreFlightValidationsWorkloadCluster(t *testing.T) {
tt := newPreflightValidationsTest(t)
mgmtClusterName := "mgmt-cluster"
tt.c.Opts.Spec.Cluster.SetManagedBy(mgmtClusterName)
tt.c.Opts.Spec.Cluster.Spec.ManagementCluster.Name = mgmtClusterName
tt.c.Opts.ManagementCluster.Name = mgmtClusterName
mgmt := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mgmt-cluster",
},
Spec: v1alpha1.ClusterSpec{
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster",
},
BundlesRef: &anywherev1.BundlesRef{
Name: "bundles-29",
Namespace: constants.EksaSystemNamespace,
},
},
}
mgmtBundle := &releasev1alpha1.Bundles{
Spec: releasev1alpha1.BundlesSpec{
Number: tt.c.Opts.Spec.Bundles.Spec.Number + 1,
},
}
tt.k.EXPECT().GetClusters(tt.ctx, tt.c.Opts.WorkloadCluster).Return(nil, nil)
tt.k.EXPECT().ValidateClustersCRD(tt.ctx, tt.c.Opts.WorkloadCluster).Return(nil)
tt.k.EXPECT().ValidateEKSAClustersCRD(tt.ctx, tt.c.Opts.WorkloadCluster).Return(nil)
tt.k.EXPECT().GetEksaCluster(tt.ctx, tt.c.Opts.ManagementCluster, mgmtClusterName).Return(mgmt, nil)
tt.k.EXPECT().GetEksaCluster(tt.ctx, tt.c.Opts.ManagementCluster, mgmtClusterName).Return(mgmt, nil)
tt.k.EXPECT().GetBundles(tt.ctx, tt.c.Opts.ManagementCluster.KubeconfigFile, mgmt.Spec.BundlesRef.Name, mgmt.Spec.BundlesRef.Namespace).Return(mgmtBundle, nil)
tt.Expect(validations.ProcessValidationResults(tt.c.PreflightValidations(tt.ctx))).To(Succeed())
}
| 97 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/validations (interfaces: DockerExecutable)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockDockerExecutable is a mock of DockerExecutable interface.
type MockDockerExecutable struct {
ctrl *gomock.Controller
recorder *MockDockerExecutableMockRecorder
}
// MockDockerExecutableMockRecorder is the mock recorder for MockDockerExecutable.
type MockDockerExecutableMockRecorder struct {
mock *MockDockerExecutable
}
// NewMockDockerExecutable creates a new mock instance.
func NewMockDockerExecutable(ctrl *gomock.Controller) *MockDockerExecutable {
mock := &MockDockerExecutable{ctrl: ctrl}
mock.recorder = &MockDockerExecutableMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDockerExecutable) EXPECT() *MockDockerExecutableMockRecorder {
return m.recorder
}
// AllocatedMemory mocks base method.
func (m *MockDockerExecutable) AllocatedMemory(arg0 context.Context) (uint64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AllocatedMemory", arg0)
ret0, _ := ret[0].(uint64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AllocatedMemory indicates an expected call of AllocatedMemory.
func (mr *MockDockerExecutableMockRecorder) AllocatedMemory(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllocatedMemory", reflect.TypeOf((*MockDockerExecutable)(nil).AllocatedMemory), arg0)
}
// Version mocks base method.
func (m *MockDockerExecutable) Version(arg0 context.Context) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Version", arg0)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Version indicates an expected call of Version.
func (mr *MockDockerExecutableMockRecorder) Version(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockDockerExecutable)(nil).Version), arg0)
}
| 66 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/validations/kubectl.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes"
executables "github.com/aws/eks-anywhere/pkg/executables"
types "github.com/aws/eks-anywhere/pkg/types"
v1alpha10 "github.com/aws/eks-anywhere/release/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// MockKubectlClient is a mock of KubectlClient interface.
type MockKubectlClient struct {
ctrl *gomock.Controller
recorder *MockKubectlClientMockRecorder
}
// MockKubectlClientMockRecorder is the mock recorder for MockKubectlClient.
type MockKubectlClientMockRecorder struct {
mock *MockKubectlClient
}
// NewMockKubectlClient creates a new mock instance.
func NewMockKubectlClient(ctrl *gomock.Controller) *MockKubectlClient {
mock := &MockKubectlClient{ctrl: ctrl}
mock.recorder = &MockKubectlClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubectlClient) EXPECT() *MockKubectlClientMockRecorder {
return m.recorder
}
// GetBundles mocks base method.
func (m *MockKubectlClient) GetBundles(ctx context.Context, kubeconfigFile, name, namespace string) (*v1alpha10.Bundles, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBundles", ctx, kubeconfigFile, name, namespace)
ret0, _ := ret[0].(*v1alpha10.Bundles)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetBundles indicates an expected call of GetBundles.
func (mr *MockKubectlClientMockRecorder) GetBundles(ctx, kubeconfigFile, name, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBundles", reflect.TypeOf((*MockKubectlClient)(nil).GetBundles), ctx, kubeconfigFile, name, namespace)
}
// GetClusters mocks base method.
func (m *MockKubectlClient) GetClusters(ctx context.Context, cluster *types.Cluster) ([]types.CAPICluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetClusters", ctx, cluster)
ret0, _ := ret[0].([]types.CAPICluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetClusters indicates an expected call of GetClusters.
func (mr *MockKubectlClientMockRecorder) GetClusters(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusters", reflect.TypeOf((*MockKubectlClient)(nil).GetClusters), ctx, cluster)
}
// GetEksaAWSIamConfig mocks base method.
func (m *MockKubectlClient) GetEksaAWSIamConfig(ctx context.Context, awsIamConfigName, kubeconfigFile, namespace string) (*v1alpha1.AWSIamConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaAWSIamConfig", ctx, awsIamConfigName, kubeconfigFile, namespace)
ret0, _ := ret[0].(*v1alpha1.AWSIamConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaAWSIamConfig indicates an expected call of GetEksaAWSIamConfig.
func (mr *MockKubectlClientMockRecorder) GetEksaAWSIamConfig(ctx, awsIamConfigName, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaAWSIamConfig", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaAWSIamConfig), ctx, awsIamConfigName, kubeconfigFile, namespace)
}
// GetEksaCluster mocks base method.
func (m *MockKubectlClient) GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaCluster", ctx, cluster, clusterName)
ret0, _ := ret[0].(*v1alpha1.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaCluster indicates an expected call of GetEksaCluster.
func (mr *MockKubectlClientMockRecorder) GetEksaCluster(ctx, cluster, clusterName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaCluster), ctx, cluster, clusterName)
}
// GetEksaFluxConfig mocks base method.
func (m *MockKubectlClient) GetEksaFluxConfig(ctx context.Context, fluxConfigName, kubeconfigFile, namespace string) (*v1alpha1.FluxConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaFluxConfig", ctx, fluxConfigName, kubeconfigFile, namespace)
ret0, _ := ret[0].(*v1alpha1.FluxConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaFluxConfig indicates an expected call of GetEksaFluxConfig.
func (mr *MockKubectlClientMockRecorder) GetEksaFluxConfig(ctx, fluxConfigName, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaFluxConfig", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaFluxConfig), ctx, fluxConfigName, kubeconfigFile, namespace)
}
// GetEksaGitOpsConfig mocks base method.
func (m *MockKubectlClient) GetEksaGitOpsConfig(ctx context.Context, gitOpsConfigName, kubeconfigFile, namespace string) (*v1alpha1.GitOpsConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaGitOpsConfig", ctx, gitOpsConfigName, kubeconfigFile, namespace)
ret0, _ := ret[0].(*v1alpha1.GitOpsConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaGitOpsConfig indicates an expected call of GetEksaGitOpsConfig.
func (mr *MockKubectlClientMockRecorder) GetEksaGitOpsConfig(ctx, gitOpsConfigName, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaGitOpsConfig", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaGitOpsConfig), ctx, gitOpsConfigName, kubeconfigFile, namespace)
}
// GetEksaOIDCConfig mocks base method.
func (m *MockKubectlClient) GetEksaOIDCConfig(ctx context.Context, oidcConfigName, kubeconfigFile, namespace string) (*v1alpha1.OIDCConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaOIDCConfig", ctx, oidcConfigName, kubeconfigFile, namespace)
ret0, _ := ret[0].(*v1alpha1.OIDCConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaOIDCConfig indicates an expected call of GetEksaOIDCConfig.
func (mr *MockKubectlClientMockRecorder) GetEksaOIDCConfig(ctx, oidcConfigName, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaOIDCConfig", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaOIDCConfig), ctx, oidcConfigName, kubeconfigFile, namespace)
}
// GetEksaTinkerbellDatacenterConfig mocks base method.
func (m *MockKubectlClient) GetEksaTinkerbellDatacenterConfig(ctx context.Context, tinkerbellDatacenterConfigName, kubeconfigFile, namespace string) (*v1alpha1.TinkerbellDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaTinkerbellDatacenterConfig", ctx, tinkerbellDatacenterConfigName, kubeconfigFile, namespace)
ret0, _ := ret[0].(*v1alpha1.TinkerbellDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaTinkerbellDatacenterConfig indicates an expected call of GetEksaTinkerbellDatacenterConfig.
func (mr *MockKubectlClientMockRecorder) GetEksaTinkerbellDatacenterConfig(ctx, tinkerbellDatacenterConfigName, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaTinkerbellDatacenterConfig", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaTinkerbellDatacenterConfig), ctx, tinkerbellDatacenterConfigName, kubeconfigFile, namespace)
}
// GetEksaTinkerbellMachineConfig mocks base method.
func (m *MockKubectlClient) GetEksaTinkerbellMachineConfig(ctx context.Context, tinkerbellMachineConfigName, kubeconfigFile, namespace string) (*v1alpha1.TinkerbellMachineConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaTinkerbellMachineConfig", ctx, tinkerbellMachineConfigName, kubeconfigFile, namespace)
ret0, _ := ret[0].(*v1alpha1.TinkerbellMachineConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaTinkerbellMachineConfig indicates an expected call of GetEksaTinkerbellMachineConfig.
func (mr *MockKubectlClientMockRecorder) GetEksaTinkerbellMachineConfig(ctx, tinkerbellMachineConfigName, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaTinkerbellMachineConfig", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaTinkerbellMachineConfig), ctx, tinkerbellMachineConfigName, kubeconfigFile, namespace)
}
// GetEksaVSphereDatacenterConfig mocks base method.
func (m *MockKubectlClient) GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName, kubeconfigFile, namespace string) (*v1alpha1.VSphereDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaVSphereDatacenterConfig", ctx, vsphereDatacenterConfigName, kubeconfigFile, namespace)
ret0, _ := ret[0].(*v1alpha1.VSphereDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaVSphereDatacenterConfig indicates an expected call of GetEksaVSphereDatacenterConfig.
func (mr *MockKubectlClientMockRecorder) GetEksaVSphereDatacenterConfig(ctx, vsphereDatacenterConfigName, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaVSphereDatacenterConfig", reflect.TypeOf((*MockKubectlClient)(nil).GetEksaVSphereDatacenterConfig), ctx, vsphereDatacenterConfigName, kubeconfigFile, namespace)
}
// GetObject mocks base method.
func (m *MockKubectlClient) GetObject(ctx context.Context, resourceType, name, namespace, kubeconfig string, obj runtime.Object) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetObject", ctx, resourceType, name, namespace, kubeconfig, obj)
ret0, _ := ret[0].(error)
return ret0
}
// GetObject indicates an expected call of GetObject.
func (mr *MockKubectlClientMockRecorder) GetObject(ctx, resourceType, name, namespace, kubeconfig, obj interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockKubectlClient)(nil).GetObject), ctx, resourceType, name, namespace, kubeconfig, obj)
}
// List mocks base method.
func (m *MockKubectlClient) List(ctx context.Context, kubeconfig string, list kubernetes.ObjectList) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "List", ctx, kubeconfig, list)
ret0, _ := ret[0].(error)
return ret0
}
// List indicates an expected call of List.
func (mr *MockKubectlClientMockRecorder) List(ctx, kubeconfig, list interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubectlClient)(nil).List), ctx, kubeconfig, list)
}
// SearchIdentityProviderConfig mocks base method.
func (m *MockKubectlClient) SearchIdentityProviderConfig(ctx context.Context, ipName, kind, kubeconfigFile, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchIdentityProviderConfig", ctx, ipName, kind, kubeconfigFile, namespace)
ret0, _ := ret[0].([]*v1alpha1.VSphereDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SearchIdentityProviderConfig indicates an expected call of SearchIdentityProviderConfig.
func (mr *MockKubectlClientMockRecorder) SearchIdentityProviderConfig(ctx, ipName, kind, kubeconfigFile, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchIdentityProviderConfig", reflect.TypeOf((*MockKubectlClient)(nil).SearchIdentityProviderConfig), ctx, ipName, kind, kubeconfigFile, namespace)
}
// ValidateClustersCRD mocks base method.
func (m *MockKubectlClient) ValidateClustersCRD(ctx context.Context, cluster *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateClustersCRD", ctx, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateClustersCRD indicates an expected call of ValidateClustersCRD.
func (mr *MockKubectlClientMockRecorder) ValidateClustersCRD(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateClustersCRD", reflect.TypeOf((*MockKubectlClient)(nil).ValidateClustersCRD), ctx, cluster)
}
// ValidateControlPlaneNodes mocks base method.
func (m *MockKubectlClient) ValidateControlPlaneNodes(ctx context.Context, cluster *types.Cluster, clusterName string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateControlPlaneNodes", ctx, cluster, clusterName)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateControlPlaneNodes indicates an expected call of ValidateControlPlaneNodes.
func (mr *MockKubectlClientMockRecorder) ValidateControlPlaneNodes(ctx, cluster, clusterName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneNodes", reflect.TypeOf((*MockKubectlClient)(nil).ValidateControlPlaneNodes), ctx, cluster, clusterName)
}
// ValidateEKSAClustersCRD mocks base method.
func (m *MockKubectlClient) ValidateEKSAClustersCRD(ctx context.Context, cluster *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateEKSAClustersCRD", ctx, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateEKSAClustersCRD indicates an expected call of ValidateEKSAClustersCRD.
func (mr *MockKubectlClientMockRecorder) ValidateEKSAClustersCRD(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateEKSAClustersCRD", reflect.TypeOf((*MockKubectlClient)(nil).ValidateEKSAClustersCRD), ctx, cluster)
}
// ValidateNodes mocks base method.
func (m *MockKubectlClient) ValidateNodes(ctx context.Context, kubeconfig string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateNodes", ctx, kubeconfig)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateNodes indicates an expected call of ValidateNodes.
func (mr *MockKubectlClientMockRecorder) ValidateNodes(ctx, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNodes", reflect.TypeOf((*MockKubectlClient)(nil).ValidateNodes), ctx, kubeconfig)
}
// ValidateWorkerNodes mocks base method.
func (m *MockKubectlClient) ValidateWorkerNodes(ctx context.Context, clusterName, kubeconfig string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateWorkerNodes", ctx, clusterName, kubeconfig)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateWorkerNodes indicates an expected call of ValidateWorkerNodes.
func (mr *MockKubectlClientMockRecorder) ValidateWorkerNodes(ctx, clusterName, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateWorkerNodes", reflect.TypeOf((*MockKubectlClient)(nil).ValidateWorkerNodes), ctx, clusterName, kubeconfig)
}
// Version mocks base method.
func (m *MockKubectlClient) Version(ctx context.Context, cluster *types.Cluster) (*executables.VersionResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Version", ctx, cluster)
ret0, _ := ret[0].(*executables.VersionResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Version indicates an expected call of Version.
func (mr *MockKubectlClientMockRecorder) Version(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockKubectlClient)(nil).Version), ctx, cluster)
}
| 320 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/validations/tls.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockTlsValidator is a mock of TlsValidator interface.
type MockTlsValidator struct {
ctrl *gomock.Controller
recorder *MockTlsValidatorMockRecorder
}
// MockTlsValidatorMockRecorder is the mock recorder for MockTlsValidator.
type MockTlsValidatorMockRecorder struct {
mock *MockTlsValidator
}
// NewMockTlsValidator creates a new mock instance.
func NewMockTlsValidator(ctrl *gomock.Controller) *MockTlsValidator {
mock := &MockTlsValidator{ctrl: ctrl}
mock.recorder = &MockTlsValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockTlsValidator) EXPECT() *MockTlsValidatorMockRecorder {
return m.recorder
}
// IsSignedByUnknownAuthority mocks base method.
func (m *MockTlsValidator) IsSignedByUnknownAuthority(host, port string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsSignedByUnknownAuthority", host, port)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IsSignedByUnknownAuthority indicates an expected call of IsSignedByUnknownAuthority.
func (mr *MockTlsValidatorMockRecorder) IsSignedByUnknownAuthority(host, port interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSignedByUnknownAuthority", reflect.TypeOf((*MockTlsValidator)(nil).IsSignedByUnknownAuthority), host, port)
}
// ValidateCert mocks base method.
func (m *MockTlsValidator) ValidateCert(host, port, caCertContent string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateCert", host, port, caCertContent)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateCert indicates an expected call of ValidateCert.
func (mr *MockTlsValidatorMockRecorder) ValidateCert(host, port, caCertContent interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCert", reflect.TypeOf((*MockTlsValidator)(nil).ValidateCert), host, port, caCertContent)
}
| 64 |
eks-anywhere | aws | Go | package upgradevalidations
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
func ValidateClusterObjectExists(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster) error {
c, err := k.GetClusters(ctx, cluster)
if err != nil {
return err
}
if len(c) == 0 {
return fmt.Errorf("no CAPI cluster objects present on workload cluster %s", cluster.Name)
}
for _, capiCluster := range c {
if capiCluster.Metadata.Name == cluster.Name {
return nil
}
}
return fmt.Errorf("couldn't find CAPI cluster object for cluster with name %s", cluster.Name)
}
| 26 |
eks-anywhere | aws | Go | package upgradevalidations_test
import (
"bytes"
"errors"
"fmt"
"reflect"
"testing"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
)
const testclustername string = "testcluster"
type UnAuthKubectlClient struct {
*executables.Kubectl
*kubernetes.UnAuthClient
}
func TestValidateClusterPresent(t *testing.T) {
tests := []struct {
name string
wantErr error
upgradeVersion v1alpha1.KubernetesVersion
getClusterResponse string
}{
{
name: "FailureNoClusters",
wantErr: errors.New("no CAPI cluster objects present on workload cluster testcluster"),
getClusterResponse: "testdata/empty_get_cluster_response.json",
},
{
name: "FailureClusterNotPresent",
wantErr: errors.New("couldn't find CAPI cluster object for cluster with name testcluster"),
getClusterResponse: "testdata/no_target_cluster_response.json",
},
{
name: "SuccessClusterPresent",
wantErr: nil,
getClusterResponse: "testdata/target_cluster_response.json",
},
}
k, ctx, cluster, e := validations.NewKubectl(t)
uk := kubernetes.NewUnAuthClient(k)
cluster.Name = testclustername
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
fileContent := test.ReadFile(t, tc.getClusterResponse)
e.EXPECT().Execute(ctx, []string{"get", capiClustersResourceType, "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}).Return(*bytes.NewBufferString(fileContent), nil)
err := upgradevalidations.ValidateClusterObjectExists(ctx, UnAuthKubectlClient{k, uk}, cluster)
if !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
var capiClustersResourceType = fmt.Sprintf("clusters.%s", clusterv1.GroupVersion.Group)
| 69 |
eks-anywhere | aws | Go | package upgradevalidations
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
eksaControllerDeploymentName = "eksa-controller-manager"
)
func ValidateEksaSystemComponents(ctx context.Context, k *executables.Kubectl, cluster *types.Cluster) error {
deployments, err := k.GetDeployments(ctx, executables.WithCluster(cluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return fmt.Errorf("getting deployments in namespace %s: %v", constants.EksaSystemNamespace, err)
}
for _, d := range deployments {
if d.Name == eksaControllerDeploymentName {
ready := d.Status.ReadyReplicas
actual := d.Status.Replicas
if actual == 0 {
return fmt.Errorf("EKS-A controller deployment %s in namespace %s is scaled to 0 replicas; should be at least one replcias", eksaControllerDeploymentName, constants.EksaSystemNamespace)
}
if ready != actual {
return fmt.Errorf("EKS-A controller deployment %s replicas in namespace %s are not ready; ready=%d, want=%d", eksaControllerDeploymentName, constants.EksaSystemNamespace, ready, actual)
}
return nil
}
}
return fmt.Errorf("failed to find EKS-A controller deployment %s in namespace %s", eksaControllerDeploymentName, constants.EksaSystemNamespace)
}
| 36 |
eks-anywhere | aws | Go | package upgradevalidations_test
import (
"bytes"
"errors"
"reflect"
"testing"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
)
func TestValidateEksaControllerReady(t *testing.T) {
tests := []struct {
name string
wantErr error
upgradeVersion v1alpha1.KubernetesVersion
getDeploymentsResponse string
}{
{
name: "FailureNoDeployment",
wantErr: errors.New("failed to find EKS-A controller deployment eksa-controller-manager in namespace eksa-system"),
getDeploymentsResponse: "testdata/empty_get_deployments_response.json",
},
{
name: "FailureReplicasNotReady",
wantErr: errors.New("EKS-A controller deployment eksa-controller-manager replicas in namespace eksa-system are not ready; ready=0, want=1"),
getDeploymentsResponse: "testdata/eksa_controller_deployment_response_no_ready_replicas.json",
},
{
name: "FailureZeroReplicas",
wantErr: errors.New("EKS-A controller deployment eksa-controller-manager in namespace eksa-system is scaled to 0 replicas; should be at least one replcias"),
getDeploymentsResponse: "testdata/eksa_controller_deployment_response_no_replicas.json",
},
{
name: "SuccessReplicasReady",
wantErr: nil,
getDeploymentsResponse: "testdata/eksa_controller_deployment_response.json",
},
}
k, ctx, cluster, e := validations.NewKubectl(t)
cluster.Name = testclustername
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
fileContent := test.ReadFile(t, tc.getDeploymentsResponse)
e.EXPECT().Execute(ctx, []string{"get", "deployments", "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}).Return(*bytes.NewBufferString(fileContent), nil)
err := upgradevalidations.ValidateEksaSystemComponents(ctx, k, cluster)
if !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
| 58 |
eks-anywhere | aws | Go | package upgradevalidations
import (
"context"
"errors"
"fmt"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
func ValidateImmutableFields(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster, spec *cluster.Spec, provider providers.Provider) error {
prevSpec, err := k.GetEksaCluster(ctx, cluster, spec.Cluster.Name)
if err != nil {
return err
}
if prevSpec.Name != spec.Cluster.Name {
return fmt.Errorf("cluster name is immutable. previous name %s, new name %s", prevSpec.Name, spec.Cluster.Name)
}
if prevSpec.Namespace != spec.Cluster.Namespace {
if !(prevSpec.Namespace == "default" && spec.Cluster.Namespace == "") {
return fmt.Errorf("cluster namespace is immutable")
}
}
oSpec := prevSpec.Spec
nSpec := spec.Cluster.Spec
if !nSpec.DatacenterRef.Equal(&oSpec.DatacenterRef) {
return fmt.Errorf("spec.dataCenterRef.name is immutable")
}
if err := ValidateGitOpsImmutableFields(ctx, k, cluster, spec, prevSpec); err != nil {
return err
}
if !nSpec.ControlPlaneConfiguration.Endpoint.Equal(oSpec.ControlPlaneConfiguration.Endpoint, nSpec.DatacenterRef.Kind) {
return fmt.Errorf("spec.controlPlaneConfiguration.endpoint is immutable")
}
/* compare all clusterNetwork fields individually, since we do allow updating updating fields for configuring plugins such as CiliumConfig through the cli*/
if !nSpec.ClusterNetwork.Pods.Equal(&oSpec.ClusterNetwork.Pods) {
return fmt.Errorf("spec.clusterNetwork.Pods is immutable")
}
if !nSpec.ClusterNetwork.Services.Equal(&oSpec.ClusterNetwork.Services) {
return fmt.Errorf("spec.clusterNetwork.Services is immutable")
}
if !nSpec.ClusterNetwork.DNS.Equal(&oSpec.ClusterNetwork.DNS) {
return fmt.Errorf("spec.clusterNetwork.DNS is immutable")
}
if !v1alpha1.CNIPluginSame(nSpec.ClusterNetwork, oSpec.ClusterNetwork) {
return fmt.Errorf("spec.clusterNetwork.CNI/CNIConfig is immutable")
}
// We don't want users to be able to toggle off SkipUpgrade until we've understood the
// implications so we are temporarily disallowing it.
oCNI := prevSpec.Spec.ClusterNetwork.CNIConfig
nCNI := spec.Cluster.Spec.ClusterNetwork.CNIConfig
if oCNI != nil && oCNI.Cilium != nil && !oCNI.Cilium.IsManaged() && nCNI.Cilium.IsManaged() {
return fmt.Errorf("spec.clusterNetwork.cniConfig.cilium.skipUpgrade cannot be toggled off")
}
if !nSpec.ProxyConfiguration.Equal(oSpec.ProxyConfiguration) {
return fmt.Errorf("spec.proxyConfiguration is immutable")
}
oldETCD := oSpec.ExternalEtcdConfiguration
newETCD := nSpec.ExternalEtcdConfiguration
if oldETCD != nil && newETCD != nil {
if oldETCD.Count != newETCD.Count {
return errors.New("spec.externalEtcdConfiguration.count is immutable")
}
} else if oldETCD != newETCD {
return errors.New("adding or removing external etcd during upgrade is not supported")
}
oldAWSIamConfigRef := &v1alpha1.Ref{}
for _, oIdentityProvider := range oSpec.IdentityProviderRefs {
switch oIdentityProvider.Kind {
case v1alpha1.AWSIamConfigKind:
oIdentityProvider := oIdentityProvider // new variable scoped to the for loop Ref: https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
oldAWSIamConfigRef = &oIdentityProvider
}
}
for _, nIdentityProvider := range nSpec.IdentityProviderRefs {
switch nIdentityProvider.Kind {
case v1alpha1.AWSIamConfigKind:
newAWSIamConfigRef := &nIdentityProvider
prevAwsIam, err := k.GetEksaAWSIamConfig(ctx, nIdentityProvider.Name, cluster.KubeconfigFile, spec.Cluster.Namespace)
if err != nil {
return err
}
if !prevAwsIam.Spec.Equal(&spec.AWSIamConfig.Spec) || !oldAWSIamConfigRef.Equal(newAWSIamConfigRef) {
return fmt.Errorf("aws iam identity provider is immutable")
}
}
}
if spec.Cluster.IsSelfManaged() != prevSpec.IsSelfManaged() {
return fmt.Errorf("management flag is immutable")
}
if oSpec.ManagementCluster.Name != nSpec.ManagementCluster.Name {
return fmt.Errorf("management cluster name is immutable")
}
return provider.ValidateNewSpec(ctx, cluster, spec)
}
func ValidateGitOpsImmutableFields(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster, clusterSpec *cluster.Spec, oldCluster *v1alpha1.Cluster) error {
if oldCluster.Spec.GitOpsRef == nil {
return nil
}
if !clusterSpec.Cluster.Spec.GitOpsRef.Equal(oldCluster.Spec.GitOpsRef) {
return errors.New("once cluster.spec.gitOpsRef is set, it is immutable")
}
switch clusterSpec.Cluster.Spec.GitOpsRef.Kind {
case v1alpha1.GitOpsConfigKind:
prevGitOps, err := k.GetEksaGitOpsConfig(ctx, clusterSpec.Cluster.Spec.GitOpsRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
if prevGitOps.Spec.Flux.Github.Owner != clusterSpec.GitOpsConfig.Spec.Flux.Github.Owner {
return errors.New("gitOps spec.flux.github.owner is immutable")
}
if prevGitOps.Spec.Flux.Github.Repository != clusterSpec.GitOpsConfig.Spec.Flux.Github.Repository {
return errors.New("gitOps spec.flux.github.repository is immutable")
}
if prevGitOps.Spec.Flux.Github.Personal != clusterSpec.GitOpsConfig.Spec.Flux.Github.Personal {
return errors.New("gitOps spec.flux.github.personal is immutable")
}
if prevGitOps.Spec.Flux.Github.FluxSystemNamespace != clusterSpec.GitOpsConfig.Spec.Flux.Github.FluxSystemNamespace {
return errors.New("gitOps spec.flux.github.fluxSystemNamespace is immutable")
}
if prevGitOps.Spec.Flux.Github.Branch != clusterSpec.GitOpsConfig.Spec.Flux.Github.Branch {
return errors.New("gitOps spec.flux.github.branch is immutable")
}
if prevGitOps.Spec.Flux.Github.ClusterConfigPath != clusterSpec.GitOpsConfig.Spec.Flux.Github.ClusterConfigPath {
return errors.New("gitOps spec.flux.github.clusterConfigPath is immutable")
}
case v1alpha1.FluxConfigKind:
prevGitOps, err := k.GetEksaFluxConfig(ctx, clusterSpec.Cluster.Spec.GitOpsRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
if prevGitOps.Spec.Git != nil {
if prevGitOps.Spec.Git.RepositoryUrl != clusterSpec.FluxConfig.Spec.Git.RepositoryUrl {
return errors.New("fluxConfig spec.fluxConfig.spec.git.repositoryUrl is immutable")
}
if prevGitOps.Spec.Git.SshKeyAlgorithm != clusterSpec.FluxConfig.Spec.Git.SshKeyAlgorithm {
return errors.New("fluxConfig spec.fluxConfig.spec.git.sshKeyAlgorithm is immutable")
}
}
if prevGitOps.Spec.Github != nil {
if prevGitOps.Spec.Github.Repository != clusterSpec.FluxConfig.Spec.Github.Repository {
return errors.New("fluxConfig spec.github.repository is immutable")
}
if prevGitOps.Spec.Github.Owner != clusterSpec.FluxConfig.Spec.Github.Owner {
return errors.New("fluxConfig spec.github.owner is immutable")
}
if prevGitOps.Spec.Github.Personal != clusterSpec.FluxConfig.Spec.Github.Personal {
return errors.New("fluxConfig spec.github.personal is immutable")
}
}
if prevGitOps.Spec.Branch != clusterSpec.FluxConfig.Spec.Branch {
return errors.New("fluxConfig spec.branch is immutable")
}
if prevGitOps.Spec.ClusterConfigPath != clusterSpec.FluxConfig.Spec.ClusterConfigPath {
return errors.New("fluxConfig spec.clusterConfigPath is immutable")
}
if prevGitOps.Spec.SystemNamespace != clusterSpec.FluxConfig.Spec.SystemNamespace {
return errors.New("fluxConfig spec.systemNamespace is immutable")
}
}
return nil
}
| 195 |
eks-anywhere | aws | Go | package upgradevalidations_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
pmock "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestValidateGitOpsImmutableFieldsRef(t *testing.T) {
tests := []struct {
name string
oldRef, newRef *v1alpha1.Ref
wantErr string
}{
{
name: "old gitRef nil, new gitRef not nil",
oldRef: nil,
newRef: &v1alpha1.Ref{
Kind: "GitOpsConfig",
Name: "gitops-new",
},
wantErr: "",
},
{
name: "old gitRef not nil, new gitRef nil",
oldRef: &v1alpha1.Ref{
Kind: "GitOpsConfig",
Name: "gitops-old",
},
newRef: nil,
wantErr: "once cluster.spec.gitOpsRef is set, it is immutable",
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
g := NewWithT(t)
ctx := context.Background()
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = testclustername
s.Cluster.Spec.GitOpsRef = tc.newRef
})
cluster := &types.Cluster{
KubeconfigFile: "kubeconfig",
}
oldCluster := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
GitOpsRef: tc.oldRef,
},
}
err := upgradevalidations.ValidateGitOpsImmutableFields(ctx, nil, cluster, clusterSpec, oldCluster)
if tc.wantErr == "" {
g.Expect(err).To(Succeed())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tc.wantErr)))
}
})
}
}
type gitOpsTest struct {
*WithT
ctx context.Context
k *mocks.MockKubectlClient
c *types.Cluster
o *v1alpha1.Cluster
s *cluster.Spec
}
func newGitClientTest(t *testing.T) *gitOpsTest {
ctrl := gomock.NewController(t)
return &gitOpsTest{
WithT: NewWithT(t),
ctx: context.Background(),
k: mocks.NewMockKubectlClient(ctrl),
c: &types.Cluster{
KubeconfigFile: "kubeconfig",
},
o: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
GitOpsRef: &v1alpha1.Ref{
Name: "test",
Kind: "GitOpsConfig",
},
},
},
s: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = testclustername
s.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{
Name: "test",
Kind: "GitOpsConfig",
}
}),
}
}
func TestValidateGitOpsImmutableFieldsGetEksaGitOpsConfigError(t *testing.T) {
g := newGitClientTest(t)
g.k.EXPECT().GetEksaGitOpsConfig(g.ctx, g.s.Cluster.Spec.GitOpsRef.Name, "kubeconfig", "").Return(nil, errors.New("error in get gitops config"))
g.Expect(upgradevalidations.ValidateGitOpsImmutableFields(g.ctx, g.k, g.c, g.s, g.o)).To(MatchError(ContainSubstring("error in get gitops config")))
}
func TestValidateGitOpsImmutableFieldsGetEksaFluxConfigError(t *testing.T) {
g := newGitClientTest(t)
g.o.Spec.GitOpsRef.Kind = "FluxConfig"
g.s.Cluster.Spec.GitOpsRef.Kind = "FluxConfig"
g.k.EXPECT().GetEksaFluxConfig(g.ctx, g.s.Cluster.Spec.GitOpsRef.Name, "kubeconfig", "").Return(nil, errors.New("error in get flux config"))
g.Expect(upgradevalidations.ValidateGitOpsImmutableFields(g.ctx, g.k, g.c, g.s, g.o)).To(MatchError(ContainSubstring("error in get flux config")))
}
func TestValidateGitOpsImmutableFieldsFluxConfig(t *testing.T) {
tests := []struct {
name string
new, old *v1alpha1.FluxConfig
wantErr string
}{
{
name: "github repo diff",
new: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Repository: "a",
},
},
},
old: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Repository: "b",
},
},
},
wantErr: "fluxConfig spec.github.repository is immutable",
},
{
name: "github owner diff",
new: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Owner: "a",
},
},
},
old: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Owner: "b",
},
},
},
wantErr: "fluxConfig spec.github.owner is immutable",
},
{
name: "github personal diff",
new: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Personal: true,
},
},
},
old: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Personal: false,
},
},
},
wantErr: "fluxConfig spec.github.personal is immutable",
},
{
name: "branch diff",
new: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Branch: "a",
},
},
old: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Branch: "b",
},
},
wantErr: "fluxConfig spec.branch is immutable",
},
{
name: "clusterConfigPath diff",
new: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: "a",
},
},
old: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: "b",
},
},
wantErr: "fluxConfig spec.clusterConfigPath is immutable",
},
{
name: "systemNamespace diff",
new: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "a",
},
},
old: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "b",
},
},
wantErr: "fluxConfig spec.systemNamespace is immutable",
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
g := newGitClientTest(t)
g.o.Spec.GitOpsRef.Kind = "FluxConfig"
g.s.Cluster.Spec.GitOpsRef.Kind = "FluxConfig"
g.s.FluxConfig = tc.new
g.k.EXPECT().GetEksaFluxConfig(g.ctx, g.s.Cluster.Spec.GitOpsRef.Name, "kubeconfig", "").Return(tc.old, nil)
err := upgradevalidations.ValidateGitOpsImmutableFields(g.ctx, g.k, g.c, g.s, g.o)
if tc.wantErr == "" {
g.Expect(err).To(Succeed())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tc.wantErr)))
}
})
}
}
func TestValidateImmutableFields(t *testing.T) {
tests := []struct {
Name string
ConfigureCurrent func(current *v1alpha1.Cluster)
ConfigureDesired func(desired *v1alpha1.Cluster)
ExpectedError string
}{
{
Name: "Toggle Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade on",
ConfigureCurrent: func(current *v1alpha1.Cluster) {
current.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
}
},
ConfigureDesired: func(desired *v1alpha1.Cluster) {
desired.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
}
},
},
{
Name: "Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade unset",
ConfigureCurrent: func(current *v1alpha1.Cluster) {
current.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{},
}
},
ConfigureDesired: func(desired *v1alpha1.Cluster) {
desired.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{},
}
},
},
{
Name: "Toggle Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade off",
ConfigureCurrent: func(current *v1alpha1.Cluster) {
current.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
}
},
ConfigureDesired: func(desired *v1alpha1.Cluster) {
desired.Spec.ClusterNetwork.CNIConfig = &v1alpha1.CNIConfig{
Cilium: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
}
},
ExpectedError: "spec.clusterNetwork.cniConfig.cilium.skipUpgrade cannot be toggled off",
},
}
clstr := &types.Cluster{}
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
current := &cluster.Spec{
Config: &cluster.Config{
Cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{{}},
},
},
},
VersionsBundle: &cluster.VersionsBundle{
VersionsBundle: &releasev1alpha1.VersionsBundle{},
KubeDistro: &cluster.KubeDistro{},
},
Bundles: &releasev1alpha1.Bundles{},
}
desired := current.DeepCopy()
tc.ConfigureCurrent(current.Config.Cluster)
tc.ConfigureDesired(desired.Config.Cluster)
client := mocks.NewMockKubectlClient(ctrl)
client.EXPECT().
GetEksaCluster(gomock.Any(), clstr, current.Cluster.Name).
Return(current.Cluster, nil)
provider := pmock.NewMockProvider(ctrl)
// The algorithm calls out to the provider to validate the new spec only if it finds
// no errors in the generic validation first.
if tc.ExpectedError == "" {
provider.EXPECT().
ValidateNewSpec(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
}
err := upgradevalidations.ValidateImmutableFields(
context.Background(),
client,
clstr,
desired,
provider,
)
if tc.ExpectedError == "" {
g.Expect(err).To(Succeed())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tc.ExpectedError)))
}
})
}
}
| 363 |
eks-anywhere | aws | Go | package upgradevalidations
import (
"context"
"fmt"
"github.com/pkg/errors"
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
// ValidatePodDisruptionBudgets returns an error if any pdbs are detected on a cluster.
func ValidatePodDisruptionBudgets(ctx context.Context, k validations.KubectlClient, cluster *types.Cluster) error {
podDisruptionBudgets := &policy.PodDisruptionBudgetList{}
if err := k.List(ctx, cluster.KubeconfigFile, podDisruptionBudgets); err != nil {
if !apierrors.IsNotFound(err) {
return errors.Wrap(err, "listing cluster pod disruption budgets for upgrade")
}
}
if len(podDisruptionBudgets.Items) != 0 {
return fmt.Errorf("one or more pod disruption budgets were detected on the cluster. Use the --skip-validations=%s flag if you wish to skip the validations for pod disruption budgets and proceed with the upgrade operation", PDB)
}
return nil
}
| 30 |
eks-anywhere | aws | Go | package upgradevalidations_test
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"testing"
"github.com/golang/mock/gomock"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
)
func TestValidatePodDisruptionBudgets(t *testing.T) {
type args struct {
ctx context.Context
k validations.KubectlClient
cluster *types.Cluster
pdbList *policy.PodDisruptionBudgetList
}
mockCtrl := gomock.NewController(t)
k := mocks.NewMockKubectlClient(mockCtrl)
c := types.Cluster{
KubeconfigFile: "test.kubeconfig",
}
tests := []struct {
name string
args args
wantErr error
}{
{
name: "PDBs exist on cluster",
args: args{
ctx: context.Background(),
k: k,
cluster: &c,
pdbList: &policy.PodDisruptionBudgetList{
Items: []policy.PodDisruptionBudget{
{
Spec: policy.PodDisruptionBudgetSpec{
MinAvailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 0,
},
},
},
},
},
},
wantErr: fmt.Errorf("one or more pod disruption budgets were detected on the cluster. Use the --skip-validations=%s flag if you wish to skip the validations for pod disruption budgets and proceed with the upgrade operation", upgradevalidations.PDB),
},
{
name: "PDBs don't exist on cluster",
args: args{
ctx: context.Background(),
k: k,
cluster: &c,
pdbList: &policy.PodDisruptionBudgetList{},
},
wantErr: nil,
},
}
for _, tt := range tests {
podDisruptionBudgets := &policy.PodDisruptionBudgetList{}
k.EXPECT().List(tt.args.ctx, tt.args.cluster.KubeconfigFile, podDisruptionBudgets).DoAndReturn(func(_ context.Context, _ string, objs kubernetes.ObjectList) error {
tt.args.pdbList.DeepCopyInto(objs.(*policy.PodDisruptionBudgetList))
return nil
})
t.Run(tt.name, func(t *testing.T) {
if err := upgradevalidations.ValidatePodDisruptionBudgets(tt.args.ctx, tt.args.k, tt.args.cluster); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("ValidatePodDisruptionBudgets() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestValidatePodDisruptionBudgetsFailure(t *testing.T) {
mockCtrl := gomock.NewController(t)
k := mocks.NewMockKubectlClient(mockCtrl)
c := types.Cluster{
KubeconfigFile: "test.kubeconfig",
}
ctx := context.Background()
pdbList := &policy.PodDisruptionBudgetList{}
k.EXPECT().List(ctx, c.KubeconfigFile, pdbList).Return(errors.New("listing cluster pod disruption budgets for upgrade"))
wantErr := errors.New("listing cluster pod disruption budgets for upgrade")
err := upgradevalidations.ValidatePodDisruptionBudgets(ctx, k, &c)
if err != nil && !strings.Contains(err.Error(), wantErr.Error()) {
t.Errorf("ValidatePodDisruptionBudgets() error = %v, wantErr %v", err, wantErr)
}
}
| 104 |
eks-anywhere | aws | Go | package upgradevalidations
import (
"context"
"fmt"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
// PreflightValidations returns the validations required before upgrading a cluster.
func (u *UpgradeValidations) PreflightValidations(ctx context.Context) []validations.Validation {
k := u.Opts.Kubectl
targetCluster := &types.Cluster{
Name: u.Opts.WorkloadCluster.Name,
KubeconfigFile: u.Opts.ManagementCluster.KubeconfigFile,
}
upgradeValidations := []validations.Validation{
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate OS is compatible with registry mirror configuration",
Remediation: "please use a valid OS for your registry mirror configuration",
Err: validations.ValidateOSForRegistryMirror(u.Opts.Spec, u.Opts.Provider),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate certificate for registry mirror",
Remediation: fmt.Sprintf("provide a valid certificate for you registry endpoint using %s env var", anywherev1.RegistryMirrorCAKey),
Err: validations.ValidateCertForRegistryMirror(u.Opts.Spec, u.Opts.TLSValidator),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "control plane ready",
Remediation: fmt.Sprintf("ensure control plane nodes and pods for cluster %s are Ready", u.Opts.WorkloadCluster.Name),
Err: k.ValidateControlPlaneNodes(ctx, targetCluster, targetCluster.Name),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "worker nodes ready",
Remediation: fmt.Sprintf("ensure machine deployments for cluster %s are Ready", u.Opts.WorkloadCluster.Name),
Err: k.ValidateWorkerNodes(ctx, u.Opts.Spec.Cluster.Name, targetCluster.KubeconfigFile),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "nodes ready",
Remediation: fmt.Sprintf("check the Status of the control plane and worker nodes in cluster %s and verify they are Ready", u.Opts.WorkloadCluster.Name),
Err: k.ValidateNodes(ctx, u.Opts.WorkloadCluster.KubeconfigFile),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "cluster CRDs ready",
Remediation: "",
Err: k.ValidateClustersCRD(ctx, u.Opts.ManagementCluster),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "cluster object present on workload cluster",
Remediation: fmt.Sprintf("ensure that the CAPI cluster object %s representing cluster %s is present", clusterv1.GroupVersion, u.Opts.WorkloadCluster.Name),
Err: ValidateClusterObjectExists(ctx, k, u.Opts.ManagementCluster),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "upgrade cluster kubernetes version increment",
Remediation: "ensure that the cluster kubernetes version is incremented by one minor version exactly (e.g. 1.18 -> 1.19)",
Err: ValidateServerVersionSkew(ctx, u.Opts.Spec.Cluster, u.Opts.WorkloadCluster, u.Opts.ManagementCluster, k),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate authentication for git provider",
Remediation: fmt.Sprintf("ensure %s, %s env variable are set and valid", config.EksaGitPrivateKeyTokenEnv, config.EksaGitKnownHostsFileEnv),
Err: validations.ValidateAuthenticationForGitProvider(u.Opts.Spec, u.Opts.CliConfig),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate immutable fields",
Remediation: "",
Err: ValidateImmutableFields(ctx, k, targetCluster, u.Opts.Spec, u.Opts.Provider),
}
},
}
if u.Opts.Spec.Cluster.IsManaged() {
upgradeValidations = append(
upgradeValidations,
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate management cluster bundle version compatibility",
Remediation: fmt.Sprintf("upgrade management cluster %s before upgrading workload cluster %s", u.Opts.Spec.Cluster.ManagedBy(), u.Opts.WorkloadCluster.Name),
Err: validations.ValidateManagementClusterBundlesVersion(ctx, k, u.Opts.ManagementCluster, u.Opts.Spec),
}
})
}
if !u.Opts.SkippedValidations[PDB] {
upgradeValidations = append(
upgradeValidations,
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate pod disruption budgets",
Remediation: "",
Err: ValidatePodDisruptionBudgets(ctx, k, u.Opts.WorkloadCluster),
}
})
}
return upgradeValidations
}
| 121 |
eks-anywhere | aws | Go | package upgradevalidations_test
import (
"context"
"errors"
"fmt"
"reflect"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
filewritermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
mockproviders "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
tinkerbellmocks "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
stackmocks "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
kubeconfigFilePath = "./fakeKubeconfigFilePath"
)
var goodClusterResponse = []types.CAPICluster{{Metadata: types.Metadata{Name: testclustername}}}
func TestPreflightValidationsTinkerbell(t *testing.T) {
tests := []struct {
name string
clusterVersion string
upgradeVersion string
getClusterResponse []types.CAPICluster
cpResponse error
workerResponse error
nodeResponse error
crdResponse error
wantErr error
modifyFunc func(s *cluster.Spec)
modifyDatacenterFunc func(s *anywherev1.TinkerbellDatacenterConfig)
modifyMachineConfigFunc func(s *anywherev1.TinkerbellMachineConfig)
}{
{
name: "ValidationSucceeds",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
},
{
name: "ValidationFailsClusterDoesNotExist",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: []types.CAPICluster{{Metadata: types.Metadata{Name: "thisIsNotTheClusterYourLookingFor"}}},
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("couldn't find CAPI cluster object for cluster with name testcluster"),
},
{
name: "ValidationFailsNoClusters",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: []types.CAPICluster{},
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("no CAPI cluster objects present on workload cluster testcluster"),
},
{
name: "ValidationFailsCpNotReady",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: errors.New("control plane nodes are not ready"),
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("control plane nodes are not ready"),
},
{
name: "ValidationFailsWorkerNodesNotReady",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: errors.New("2 worker nodes are not ready"),
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("2 worker nodes are not ready"),
},
{
name: "ValidationFailsNodesNotReady",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: errors.New("node test-node is not ready, currently in Unknown state"),
crdResponse: nil,
wantErr: composeError("node test-node is not ready, currently in Unknown state"),
},
{
name: "ValidationFailsNoCrds",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: errors.New("error getting clusters crd: crd not found"),
wantErr: composeError("error getting clusters crd: crd not found"),
},
{
name: "ValidationFailsExplodingCluster",
clusterVersion: "v1.18.16-eks-1-18-4",
upgradeVersion: "1.20",
getClusterResponse: []types.CAPICluster{{Metadata: types.Metadata{Name: "thisIsNotTheClusterYourLookingFor"}}},
cpResponse: errors.New("control plane nodes are not ready"),
workerResponse: errors.New("2 worker nodes are not ready"),
nodeResponse: errors.New("node test-node is not ready, currently in Unknown state"),
crdResponse: errors.New("error getting clusters crd: crd not found"),
wantErr: explodingClusterError,
},
{
name: "ValidationControlPlaneImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.controlPlaneConfiguration.endpoint is immutable"),
modifyFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "2.3.4.5"
},
},
{
name: "ValidationClusterNetworkPodsImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.clusterNetwork.Pods is immutable"),
modifyFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.Pods = anywherev1.Pods{}
},
},
{
name: "ValidationClusterNetworkServicesImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.clusterNetwork.Services is immutable"),
modifyFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.Services = anywherev1.Services{}
},
},
{
name: "ValidationManagementImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("management flag is immutable"),
modifyFunc: func(s *cluster.Spec) {
s.Cluster.SetManagedBy(fmt.Sprintf("%s-1", s.Cluster.ManagedBy()))
},
},
{
name: "ValidationTinkerbellIPImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.TinkerbellIP is immutable. Previous value 4.5.6.7, New value 1.2.3.4"),
modifyDatacenterFunc: func(s *anywherev1.TinkerbellDatacenterConfig) {
s.Spec.TinkerbellIP = "4.5.6.7"
},
},
{
name: "ValidationOSImageURLImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.OSImageURL is immutable. Previous value http://old-os-image-url, New value http://os-image-url"),
modifyDatacenterFunc: func(s *anywherev1.TinkerbellDatacenterConfig) {
s.Spec.OSImageURL = "http://old-os-image-url"
},
},
{
name: "ValidationHookImageURLImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.HookImagesURLPath is immutable. Previous value http://old-hook-image-url, New value http://hook-image-url"),
modifyDatacenterFunc: func(s *anywherev1.TinkerbellDatacenterConfig) {
s.Spec.HookImagesURLPath = "http://old-hook-image-url"
},
},
{
name: "ValidationSSHUsernameImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.Users[0].Name is immutable. Previous value myOldSshUsername, New value mySshUsername"),
modifyMachineConfigFunc: func(s *anywherev1.TinkerbellMachineConfig) {
s.Spec.Users[0].Name = "myOldSshUsername"
},
},
{
name: "ValidationSSHAuthorizedKeysImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.Users[0].SshAuthorizedKeys[0] is immutable. Previous value myOldSshAuthorizedKeys, New value mySshAuthorizedKey"),
modifyMachineConfigFunc: func(s *anywherev1.TinkerbellMachineConfig) {
s.Spec.Users[0].SshAuthorizedKeys[0] = "myOldSshAuthorizedKeys"
},
},
{
name: "ValidationHardwareSelectorImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.HardwareSelector is immutable. Previous value map[type:cp1], New value map[type:cp]"),
modifyMachineConfigFunc: func(s *anywherev1.TinkerbellMachineConfig) {
s.Spec.HardwareSelector = map[string]string{
"type": "cp1",
}
},
},
}
defaultControlPlane := anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &anywherev1.Ref{
Name: "test-cp",
Kind: "TinkerbellMachineConfig",
},
}
defaultDatacenterSpec := anywherev1.TinkerbellDatacenterConfig{
Spec: anywherev1.TinkerbellDatacenterConfigSpec{
TinkerbellIP: "1.2.3.4",
OSImageURL: "http://os-image-url",
HookImagesURLPath: "http://hook-image-url",
},
Status: anywherev1.TinkerbellDatacenterConfigStatus{},
}
defaultTinkerbellMachineConfigSpec := anywherev1.TinkerbellMachineConfig{
Spec: anywherev1.TinkerbellMachineConfigSpec{
HardwareSelector: map[string]string{
"type": "cp",
},
OSFamily: "ubuntu",
Users: []anywherev1.UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = testclustername
s.Cluster.Spec.ControlPlaneConfiguration = defaultControlPlane
s.Cluster.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.TinkerbellDatacenterKind,
Name: "tinkerbell test",
}
s.Cluster.Spec.ClusterNetwork = anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
Services: anywherev1.Services{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
}
})
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
workloadCluster := &types.Cluster{}
ctx := context.Background()
workloadCluster.KubeconfigFile = kubeconfigFilePath
workloadCluster.Name = testclustername
mockCtrl := gomock.NewController(t)
k := mocks.NewMockKubectlClient(mockCtrl)
kubectl := tinkerbellmocks.NewMockProviderKubectlClient(mockCtrl)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
tlsValidator := mocks.NewMockTlsValidator(mockCtrl)
provider := newProvider(defaultDatacenterSpec, givenTinkerbellMachineConfigs(t), clusterSpec.Cluster, writer, docker, helm, kubectl, false)
opts := &validations.Opts{
Kubectl: k,
Spec: clusterSpec,
WorkloadCluster: workloadCluster,
ManagementCluster: workloadCluster,
Provider: provider,
TLSValidator: tlsValidator,
}
clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.KubernetesVersion(tc.upgradeVersion)
existingClusterSpec := clusterSpec.DeepCopy()
existingClusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.KubernetesVersion(tc.clusterVersion)
existingProviderSpec := defaultDatacenterSpec.DeepCopy()
existingMachineConfigSpec := defaultTinkerbellMachineConfigSpec.DeepCopy()
if tc.modifyFunc != nil {
tc.modifyFunc(existingClusterSpec)
}
if tc.modifyDatacenterFunc != nil {
tc.modifyDatacenterFunc(existingProviderSpec)
}
if tc.modifyMachineConfigFunc != nil {
tc.modifyMachineConfigFunc(existingMachineConfigSpec)
}
kubectl.EXPECT().GetEksaCluster(ctx, workloadCluster, clusterSpec.Cluster.Name).Return(existingClusterSpec.Cluster, nil).MaxTimes(1)
kubectl.EXPECT().GetEksaTinkerbellDatacenterConfig(ctx, clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), gomock.Any()).Return(existingProviderSpec, nil).MaxTimes(1)
kubectl.EXPECT().GetEksaTinkerbellMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, gomock.Any(), gomock.Any()).Return(existingMachineConfigSpec, nil).MaxTimes(1)
k.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
k.EXPECT().ValidateControlPlaneNodes(ctx, workloadCluster, clusterSpec.Cluster.Name).Return(tc.cpResponse)
k.EXPECT().ValidateWorkerNodes(ctx, workloadCluster.Name, workloadCluster.KubeconfigFile).Return(tc.workerResponse)
k.EXPECT().ValidateNodes(ctx, kubeconfigFilePath).Return(tc.nodeResponse)
k.EXPECT().ValidateClustersCRD(ctx, workloadCluster).Return(tc.crdResponse)
k.EXPECT().GetClusters(ctx, workloadCluster).Return(tc.getClusterResponse, nil)
k.EXPECT().GetEksaCluster(ctx, workloadCluster, clusterSpec.Cluster.Name).Return(existingClusterSpec.Cluster, nil).Times(2)
upgradeValidations := upgradevalidations.New(opts)
err := validations.ProcessValidationResults(upgradeValidations.PreflightValidations(ctx))
if err != nil && err.Error() != tc.wantErr.Error() {
t.Errorf("%s want err=%v\n got err=%v\n", tc.name, tc.wantErr, err)
}
})
}
}
func givenTinkerbellMachineConfigs(t *testing.T) map[string]*anywherev1.TinkerbellMachineConfig {
machineConfigs, err := anywherev1.GetTinkerbellMachineConfigs("./testdata/tinkerbell_clusterconfig.yaml")
if err != nil {
t.Fatalf("unable to get machine configs from file: %v", err)
}
return machineConfigs
}
func newProvider(datacenterConfig anywherev1.TinkerbellDatacenterConfig, machineConfigs map[string]*anywherev1.TinkerbellMachineConfig, clusterConfig *anywherev1.Cluster, writer filewriter.FileWriter, docker stack.Docker, helm stack.Helm, kubectl tinkerbell.ProviderKubectlClient, forceCleanup bool) *tinkerbell.Provider {
hardwareFile := "./testdata/hardware.csv"
provider, err := tinkerbell.NewProvider(
&datacenterConfig,
machineConfigs,
clusterConfig,
hardwareFile,
writer,
docker,
helm,
kubectl,
"1.2.3.4",
test.FakeNow,
forceCleanup,
false,
)
if err != nil {
panic(err)
}
return provider
}
func TestPreflightValidationsVsphere(t *testing.T) {
tests := []struct {
name string
clusterVersion string
upgradeVersion string
getClusterResponse []types.CAPICluster
cpResponse error
workerResponse error
nodeResponse error
crdResponse error
wantErr error
modifyExistingSpecFunc func(s *cluster.Spec)
modifyDefaultSpecFunc func(s *cluster.Spec)
additionalKubectlMocks func(k *mocks.MockKubectlClient)
}{
{
name: "ValidationSucceeds",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
},
{
name: "ValidationFailsClusterDoesNotExist",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: []types.CAPICluster{{Metadata: types.Metadata{Name: "thisIsNotTheClusterYourLookingFor"}}},
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("couldn't find CAPI cluster object for cluster with name testcluster"),
},
{
name: "ValidationFailsNoClusters",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: []types.CAPICluster{},
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("no CAPI cluster objects present on workload cluster testcluster"),
},
{
name: "ValidationFailsCpNotReady",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: errors.New("control plane nodes are not ready"),
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("control plane nodes are not ready"),
},
{
name: "ValidationFailsWorkerNodesNotReady",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: errors.New("2 worker nodes are not ready"),
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("2 worker nodes are not ready"),
},
{
name: "ValidationFailsNodesNotReady",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: errors.New("node test-node is not ready, currently in Unknown state"),
crdResponse: nil,
wantErr: composeError("node test-node is not ready, currently in Unknown state"),
},
{
name: "ValidationFailsNoCrds",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: errors.New("error getting clusters crd: crd not found"),
wantErr: composeError("error getting clusters crd: crd not found"),
},
{
name: "ValidationFailsExplodingCluster",
clusterVersion: "v1.18.16-eks-1-18-4",
upgradeVersion: "1.20",
getClusterResponse: []types.CAPICluster{{Metadata: types.Metadata{Name: "thisIsNotTheClusterYourLookingFor"}}},
cpResponse: errors.New("control plane nodes are not ready"),
workerResponse: errors.New("2 worker nodes are not ready"),
nodeResponse: errors.New("node test-node is not ready, currently in Unknown state"),
crdResponse: errors.New("error getting clusters crd: crd not found"),
wantErr: explodingClusterError,
},
{
name: "ValidationEtcdImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.externalEtcdConfiguration.count is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ExternalEtcdConfiguration.Count++
},
},
{
name: "ValidationControlPlaneImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.controlPlaneConfiguration.endpoint is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "2.3.4.5"
},
},
{
name: "ValidationAwsIamRegionImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("aws iam identity provider is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.AWSIamConfig.Spec.AWSRegion = "us-east-2"
},
},
{
name: "ValidationAwsIamBackEndModeImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("aws iam identity provider is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.AWSIamConfig.Spec.BackendMode = append(s.AWSIamConfig.Spec.BackendMode, "us-east-2")
},
},
{
name: "ValidationAwsIamPartitionImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("aws iam identity provider is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.AWSIamConfig.Spec.Partition = "partition2"
},
},
{
name: "ValidationAwsIamNameImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("aws iam identity provider is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.IdentityProviderRefs[1] = anywherev1.Ref{
Kind: anywherev1.AWSIamConfigKind,
Name: "aws-iam2",
}
},
},
{
name: "ValidationAwsIamKindImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("aws iam identity provider is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.IdentityProviderRefs[0] = anywherev1.Ref{
Kind: anywherev1.OIDCConfigKind,
Name: "oidc",
}
},
},
{
name: "ValidationAwsIamKindImmutableSwapOrder",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.IdentityProviderRefs[1] = anywherev1.Ref{
Kind: anywherev1.AWSIamConfigKind,
Name: "aws-iam",
}
s.Cluster.Spec.IdentityProviderRefs[0] = anywherev1.Ref{
Kind: anywherev1.OIDCConfigKind,
Name: "oidc",
}
},
},
{
name: "ValidationGitOpsNamespaceImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("gitOps spec.flux.github.fluxSystemNamespace is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.GitOpsConfig.Spec.Flux.Github.FluxSystemNamespace = "new-namespace"
},
},
{
name: "ValidationGitOpsBranchImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("gitOps spec.flux.github.branch is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.GitOpsConfig.Spec.Flux.Github.Branch = "new-branch"
},
},
{
name: "ValidationGitOpsOwnerImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("gitOps spec.flux.github.owner is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.GitOpsConfig.Spec.Flux.Github.Owner = "new-owner"
},
},
{
name: "ValidationGitOpsRepositoryImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("gitOps spec.flux.github.repository is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.GitOpsConfig.Spec.Flux.Github.Repository = "new-repository"
},
},
{
name: "ValidationGitOpsPathImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("gitOps spec.flux.github.clusterConfigPath is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.GitOpsConfig.Spec.Flux.Github.ClusterConfigPath = "new-path"
},
},
{
name: "ValidationGitOpsPersonalImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("gitOps spec.flux.github.personal is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.GitOpsConfig.Spec.Flux.Github.Personal = !s.GitOpsConfig.Spec.Flux.Github.Personal
},
},
{
name: "ValidationOIDCClientIdMutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.OIDCConfig.Spec.ClientId = "new-client-id"
},
},
{
name: "ValidationOIDCGroupsClaimMutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.OIDCConfig.Spec.GroupsClaim = "new-groups-claim"
},
},
{
name: "ValidationOIDCGroupsPrefixMutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.OIDCConfig.Spec.GroupsPrefix = "new-groups-prefix"
},
},
{
name: "ValidationOIDCIssuerUrlMutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.OIDCConfig.Spec.IssuerUrl = "new-issuer-url"
},
},
{
name: "ValidationOIDCUsernameClaimMutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.OIDCConfig.Spec.UsernameClaim = "new-username-claim"
},
},
{
name: "ValidationOIDCUsernamePrefixMutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.OIDCConfig.Spec.UsernamePrefix = "new-username-prefix"
},
},
{
name: "ValidationOIDCRequiredClaimsMutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: nil,
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.OIDCConfig.Spec.RequiredClaims[0].Claim = "new-groups-claim"
},
},
{
name: "ValidationClusterNetworkPodsImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.clusterNetwork.Pods is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.Pods = anywherev1.Pods{}
},
},
{
name: "ValidationClusterNetworkServicesImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.clusterNetwork.Services is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.Services = anywherev1.Services{}
},
},
{
name: "ValidationClusterNetworkDNSImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.clusterNetwork.DNS is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ClusterNetwork.DNS = anywherev1.DNS{}
},
},
{
name: "ValidationProxyConfigurationImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.proxyConfiguration is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ProxyConfiguration = &anywherev1.ProxyConfiguration{
HttpProxy: "httpproxy2",
HttpsProxy: "httpsproxy2",
NoProxy: []string{
"noproxy3",
},
}
},
},
{
name: "ValidationEtcdConfigReplicasImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.externalEtcdConfiguration.count is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ExternalEtcdConfiguration.Count += 1
s.Cluster.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.VSphereDatacenterKind,
Name: "vsphere test",
}
},
},
{
name: "ValidationEtcdConfigPreviousSpecEmpty",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("adding or removing external etcd during upgrade is not supported"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ExternalEtcdConfiguration = nil
s.Cluster.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.VSphereDatacenterKind,
Name: "vsphere test",
}
},
},
{
name: "ValidationManagementImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: string(anywherev1.Kube119),
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("management flag is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.SetManagedBy(fmt.Sprintf("%s-1", s.Cluster.ManagedBy()))
},
},
{
name: "ValidationManagementClusterNameImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: string(anywherev1.Kube119),
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("management cluster name is immutable"),
modifyExistingSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ManagementCluster.Name = fmt.Sprintf("%s-1", s.Cluster.ManagedBy())
},
modifyDefaultSpecFunc: func(s *cluster.Spec) {
s.Cluster.Spec.ManagementCluster.Name = fmt.Sprintf("%s-2", s.Cluster.ManagedBy())
},
},
}
defaultControlPlane := anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &anywherev1.Ref{
Name: "test",
Kind: "VSphereMachineConfig",
},
}
defaultETCD := &anywherev1.ExternalEtcdConfiguration{
Count: 3,
}
defaultDatacenterSpec := anywherev1.VSphereDatacenterConfig{
Spec: anywherev1.VSphereDatacenterConfigSpec{
Datacenter: "datacenter!!!",
Network: "network",
Server: "server",
Thumbprint: "thumbprint",
Insecure: false,
},
Status: anywherev1.VSphereDatacenterConfigStatus{},
}
defaultGitOps := &anywherev1.GitOpsConfig{
Spec: anywherev1.GitOpsConfigSpec{
Flux: anywherev1.Flux{
Github: anywherev1.Github{
Owner: "owner",
Repository: "repo",
FluxSystemNamespace: "flux-system",
Branch: "main",
ClusterConfigPath: "clusters/" + testclustername,
Personal: false,
},
},
},
}
defaultOIDC := &anywherev1.OIDCConfig{
Spec: anywherev1.OIDCConfigSpec{
ClientId: "client-id",
GroupsClaim: "groups-claim",
GroupsPrefix: "groups-prefix",
IssuerUrl: "issuer-url",
RequiredClaims: []anywherev1.OIDCConfigRequiredClaim{{
Claim: "claim",
Value: "value",
}},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
}
defaultAWSIAM := &anywherev1.AWSIamConfig{
Spec: anywherev1.AWSIamConfigSpec{
AWSRegion: "us-east-1",
MapRoles: []anywherev1.MapRoles{{
RoleARN: "roleARN",
Username: "username",
Groups: []string{"group1", "group2"},
}},
MapUsers: []anywherev1.MapUsers{{
UserARN: "userARN",
Username: "username",
Groups: []string{"group1", "group2"},
}},
Partition: "partition",
},
}
defaultClusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = testclustername
s.Cluster.Spec.ControlPlaneConfiguration = defaultControlPlane
s.Cluster.Spec.ExternalEtcdConfiguration = defaultETCD
s.Cluster.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.VSphereDatacenterKind,
Name: "vsphere test",
}
s.Cluster.Spec.IdentityProviderRefs = []anywherev1.Ref{
{
Kind: anywherev1.AWSIamConfigKind,
Name: "aws-iam",
},
{
Kind: anywherev1.OIDCConfigKind,
Name: "oidc",
},
}
s.Cluster.Spec.GitOpsRef = &anywherev1.Ref{
Kind: anywherev1.GitOpsConfigKind,
Name: "gitops test",
}
s.Cluster.Spec.ClusterNetwork = anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
Services: anywherev1.Services{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
DNS: anywherev1.DNS{
ResolvConf: &anywherev1.ResolvConf{Path: "file.conf"},
},
}
s.Cluster.Spec.ProxyConfiguration = &anywherev1.ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
NoProxy: []string{
"noproxy1",
"noproxy2",
},
}
s.Cluster.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: "bundles-28",
Namespace: constants.EksaSystemNamespace,
}
s.GitOpsConfig = defaultGitOps
s.OIDCConfig = defaultOIDC
s.AWSIamConfig = defaultAWSIAM
})
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
_, ctx, workloadCluster, _ := validations.NewKubectl(t)
workloadCluster.KubeconfigFile = kubeconfigFilePath
workloadCluster.Name = testclustername
mockCtrl := gomock.NewController(t)
k := mocks.NewMockKubectlClient(mockCtrl)
tlsValidator := mocks.NewMockTlsValidator(mockCtrl)
provider := mockproviders.NewMockProvider(mockCtrl)
clusterSpec := defaultClusterSpec.DeepCopy()
if tc.modifyDefaultSpecFunc != nil {
tc.modifyDefaultSpecFunc(clusterSpec)
}
opts := &validations.Opts{
Kubectl: k,
Spec: clusterSpec,
WorkloadCluster: workloadCluster,
ManagementCluster: workloadCluster,
Provider: provider,
TLSValidator: tlsValidator,
}
clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.KubernetesVersion(tc.upgradeVersion)
existingClusterSpec := defaultClusterSpec.DeepCopy()
existingClusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.KubernetesVersion(tc.clusterVersion)
existingProviderSpec := defaultDatacenterSpec.DeepCopy()
if tc.modifyExistingSpecFunc != nil {
tc.modifyExistingSpecFunc(existingClusterSpec)
}
bundlesResponse := &releasev1alpha1.Bundles{
Spec: releasev1alpha1.BundlesSpec{
Number: 28,
},
}
provider.EXPECT().DatacenterConfig(clusterSpec).Return(existingProviderSpec).MaxTimes(1)
provider.EXPECT().ValidateNewSpec(ctx, workloadCluster, clusterSpec).Return(nil).MaxTimes(1)
k.EXPECT().GetEksaVSphereDatacenterConfig(ctx, clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), gomock.Any()).Return(existingProviderSpec, nil).MaxTimes(1)
k.EXPECT().ValidateControlPlaneNodes(ctx, workloadCluster, clusterSpec.Cluster.Name).Return(tc.cpResponse)
k.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
k.EXPECT().ValidateWorkerNodes(ctx, workloadCluster.Name, workloadCluster.KubeconfigFile).Return(tc.workerResponse)
k.EXPECT().ValidateNodes(ctx, kubeconfigFilePath).Return(tc.nodeResponse)
k.EXPECT().ValidateClustersCRD(ctx, workloadCluster).Return(tc.crdResponse)
k.EXPECT().GetClusters(ctx, workloadCluster).Return(tc.getClusterResponse, nil)
k.EXPECT().GetEksaCluster(ctx, workloadCluster, clusterSpec.Cluster.Name).Return(existingClusterSpec.Cluster, nil).MaxTimes(2)
if opts.Spec.Cluster.IsManaged() {
k.EXPECT().GetEksaCluster(ctx, workloadCluster, workloadCluster.Name).Return(existingClusterSpec.Cluster, nil).MaxTimes(2)
k.EXPECT().GetBundles(ctx, workloadCluster.KubeconfigFile, existingClusterSpec.Cluster.Spec.BundlesRef.Name, existingClusterSpec.Cluster.Spec.BundlesRef.Namespace).Return(bundlesResponse, nil)
}
k.EXPECT().GetEksaGitOpsConfig(ctx, clusterSpec.Cluster.Spec.GitOpsRef.Name, gomock.Any(), gomock.Any()).Return(existingClusterSpec.GitOpsConfig, nil).MaxTimes(1)
k.EXPECT().GetEksaOIDCConfig(ctx, clusterSpec.Cluster.Spec.IdentityProviderRefs[1].Name, gomock.Any(), gomock.Any()).Return(existingClusterSpec.OIDCConfig, nil).MaxTimes(1)
k.EXPECT().GetEksaAWSIamConfig(ctx, clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, gomock.Any(), gomock.Any()).Return(existingClusterSpec.AWSIamConfig, nil).MaxTimes(1)
upgradeValidations := upgradevalidations.New(opts)
err := validations.ProcessValidationResults(upgradeValidations.PreflightValidations(ctx))
if err != nil && err.Error() != tc.wantErr.Error() {
t.Errorf("%s want err=%v\n got err=%v\n", tc.name, tc.wantErr, err)
}
})
}
}
func composeError(msgs ...string) *validations.ValidationError {
var errs []string
errs = append(errs, msgs...)
return &validations.ValidationError{Errs: errs}
}
var explodingClusterError = composeError(
"control plane nodes are not ready",
"2 worker nodes are not ready",
"node test-node is not ready, currently in Unknown state",
"error getting clusters crd: crd not found",
"couldn't find CAPI cluster object for cluster with name testcluster",
"spec: Invalid value: \"1.20\": only +1 minor version skew is supported, minor version skew detected 2",
)
func TestPreFlightValidationsGit(t *testing.T) {
tests := []struct {
name string
clusterVersion string
upgradeVersion string
getClusterResponse []types.CAPICluster
cpResponse error
workerResponse error
nodeResponse error
crdResponse error
wantErr error
modifyFunc func(s *cluster.Spec)
}{
{
name: "ValidationFluxSshKeyAlgoImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("fluxConfig spec.fluxConfig.spec.git.sshKeyAlgorithm is immutable"),
modifyFunc: func(s *cluster.Spec) {
s.FluxConfig.Spec.Git.SshKeyAlgorithm = "rsa2"
},
},
{
name: "ValidationFluxRepoUrlImmutable",
clusterVersion: "v1.19.16-eks-1-19-4",
upgradeVersion: "1.19",
getClusterResponse: goodClusterResponse,
cpResponse: nil,
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("fluxConfig spec.fluxConfig.spec.git.repositoryUrl is immutable"),
modifyFunc: func(s *cluster.Spec) {
s.FluxConfig.Spec.Git.RepositoryUrl = "test2"
},
},
}
defaultControlPlane := anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &anywherev1.Ref{
Name: "test",
Kind: "VSphereMachineConfig",
},
}
defaultETCD := &anywherev1.ExternalEtcdConfiguration{
Count: 3,
}
defaultDatacenterSpec := anywherev1.VSphereDatacenterConfig{
Spec: anywherev1.VSphereDatacenterConfigSpec{
Datacenter: "datacenter!!!",
Network: "network",
Server: "server",
Thumbprint: "thumbprint",
Insecure: false,
},
Status: anywherev1.VSphereDatacenterConfigStatus{},
}
defaultFlux := &anywherev1.FluxConfig{
Spec: anywherev1.FluxConfigSpec{
Git: &anywherev1.GitProviderConfig{
RepositoryUrl: "test",
SshKeyAlgorithm: "rsa",
},
},
}
defaultOIDC := &anywherev1.OIDCConfig{
Spec: anywherev1.OIDCConfigSpec{
ClientId: "client-id",
GroupsClaim: "groups-claim",
GroupsPrefix: "groups-prefix",
IssuerUrl: "issuer-url",
RequiredClaims: []anywherev1.OIDCConfigRequiredClaim{{
Claim: "claim",
Value: "value",
}},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
}
defaultAWSIAM := &anywherev1.AWSIamConfig{
Spec: anywherev1.AWSIamConfigSpec{
AWSRegion: "us-east-1",
MapRoles: []anywherev1.MapRoles{{
RoleARN: "roleARN",
Username: "username",
Groups: []string{"group1", "group2"},
}},
MapUsers: []anywherev1.MapUsers{{
UserARN: "userARN",
Username: "username",
Groups: []string{"group1", "group2"},
}},
Partition: "partition",
},
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = testclustername
s.Cluster.Spec.ControlPlaneConfiguration = defaultControlPlane
s.Cluster.Spec.ExternalEtcdConfiguration = defaultETCD
s.Cluster.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.VSphereDatacenterKind,
Name: "vsphere test",
}
s.Cluster.Spec.IdentityProviderRefs = []anywherev1.Ref{
{
Kind: anywherev1.OIDCConfigKind,
Name: "oidc",
},
{
Kind: anywherev1.AWSIamConfigKind,
Name: "aws-iam",
},
}
s.Cluster.Spec.GitOpsRef = &anywherev1.Ref{
Kind: anywherev1.FluxConfigKind,
Name: "flux test",
}
s.Cluster.Spec.ClusterNetwork = anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
Services: anywherev1.Services{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
DNS: anywherev1.DNS{
ResolvConf: &anywherev1.ResolvConf{Path: "file.conf"},
},
}
s.Cluster.Spec.ProxyConfiguration = &anywherev1.ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
NoProxy: []string{
"noproxy1",
"noproxy2",
},
}
s.OIDCConfig = defaultOIDC
s.AWSIamConfig = defaultAWSIAM
s.FluxConfig = defaultFlux
})
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
workloadCluster := &types.Cluster{}
ctx := context.Background()
workloadCluster.KubeconfigFile = kubeconfigFilePath
workloadCluster.Name = testclustername
mockCtrl := gomock.NewController(t)
k := mocks.NewMockKubectlClient(mockCtrl)
tlsValidator := mocks.NewMockTlsValidator(mockCtrl)
cliConfig := &config.CliConfig{
GitPrivateKeyFile: "testdata/git_nonempty_private_key",
GitSshKeyPassphrase: "test",
GitKnownHostsFile: "testdata/git_nonempty_ssh_known_hosts",
}
provider := mockproviders.NewMockProvider(mockCtrl)
opts := &validations.Opts{
Kubectl: k,
Spec: clusterSpec,
WorkloadCluster: workloadCluster,
ManagementCluster: workloadCluster,
Provider: provider,
TLSValidator: tlsValidator,
CliConfig: cliConfig,
}
clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.KubernetesVersion(tc.upgradeVersion)
existingClusterSpec := clusterSpec.DeepCopy()
existingProviderSpec := defaultDatacenterSpec.DeepCopy()
if tc.modifyFunc != nil {
tc.modifyFunc(existingClusterSpec)
}
provider.EXPECT().DatacenterConfig(clusterSpec).Return(existingProviderSpec).MaxTimes(1)
provider.EXPECT().ValidateNewSpec(ctx, workloadCluster, clusterSpec).Return(nil).MaxTimes(1)
k.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
k.EXPECT().GetEksaVSphereDatacenterConfig(ctx, clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), gomock.Any()).Return(existingProviderSpec, nil).MaxTimes(1)
k.EXPECT().ValidateControlPlaneNodes(ctx, workloadCluster, clusterSpec.Cluster.Name).Return(tc.cpResponse)
k.EXPECT().ValidateWorkerNodes(ctx, workloadCluster.Name, workloadCluster.KubeconfigFile).Return(tc.workerResponse)
k.EXPECT().ValidateNodes(ctx, kubeconfigFilePath).Return(tc.nodeResponse)
k.EXPECT().ValidateClustersCRD(ctx, workloadCluster).Return(tc.crdResponse)
k.EXPECT().GetClusters(ctx, workloadCluster).Return(tc.getClusterResponse, nil)
k.EXPECT().GetEksaCluster(ctx, workloadCluster, clusterSpec.Cluster.Name).Return(existingClusterSpec.Cluster, nil).Times(2)
k.EXPECT().GetEksaFluxConfig(ctx, clusterSpec.Cluster.Spec.GitOpsRef.Name, gomock.Any(), gomock.Any()).Return(existingClusterSpec.FluxConfig, nil).MaxTimes(1)
k.EXPECT().GetEksaOIDCConfig(ctx, clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, gomock.Any(), gomock.Any()).Return(existingClusterSpec.OIDCConfig, nil).MaxTimes(1)
k.EXPECT().GetEksaAWSIamConfig(ctx, clusterSpec.Cluster.Spec.IdentityProviderRefs[1].Name, gomock.Any(), gomock.Any()).Return(existingClusterSpec.AWSIamConfig, nil).MaxTimes(1)
upgradeValidations := upgradevalidations.New(opts)
err := validations.ProcessValidationResults(upgradeValidations.PreflightValidations(ctx))
if !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%s want err=%v\n got err=%v\n", tc.name, tc.wantErr, err)
}
})
}
}
| 1,392 |
eks-anywhere | aws | Go | package upgradevalidations
import (
"fmt"
"strings"
)
// string values of supported validation names that can be skipped.
const (
PDB = "pod-disruption"
)
// SkippableValidations represents all the validations we offer for users to skip.
var SkippableValidations = []string{
PDB,
}
// ValidSkippableValidationsMap returns a map for all valid skippable validations as keys, defaulting values to false. Defaulting to False means these validations won't be skipped unless set to True.
func validSkippableValidationsMap() map[string]bool {
validationsMap := make(map[string]bool, len(SkippableValidations))
for i := range SkippableValidations {
validationsMap[SkippableValidations[i]] = false
}
return validationsMap
}
// ValidateSkippableUpgradeValidation validates if provided validations are supported by EKSA to skip for upgrades.
func ValidateSkippableUpgradeValidation(skippedValidations []string) (map[string]bool, error) {
svMap := validSkippableValidationsMap()
for i := range skippedValidations {
validationName := skippedValidations[i]
_, ok := svMap[validationName]
if !ok {
return nil, fmt.Errorf("invalid validation name to be skipped. The supported upgrade validations that can be skipped using --skip-validations are %s", strings.Join(SkippableValidations[:], ","))
}
svMap[validationName] = true
}
return svMap, nil
}
| 44 |
eks-anywhere | aws | Go | package upgradevalidations_test
import (
"fmt"
"reflect"
"strings"
"testing"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
)
func TestValidateSkippableUpgradeValidation(t *testing.T) {
tests := []struct {
name string
want map[string]bool
wantErr error
skippedValidations []string
}{
{
name: "invalid upgrade validation param",
want: nil,
wantErr: fmt.Errorf("invalid validation name to be skipped. The supported upgrade validations that can be skipped using --skip-validations are %s", strings.Join(upgradevalidations.SkippableValidations[:], ",")),
skippedValidations: []string{"test"},
},
{
name: "valid upgrade validation param",
want: map[string]bool{
upgradevalidations.PDB: true,
},
wantErr: nil,
skippedValidations: []string{upgradevalidations.PDB},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := upgradevalidations.ValidateSkippableUpgradeValidation(tt.skippedValidations)
if !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("ValidateSkippableUpgradeValidation() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ValidateSkippableUpgradeValidation() = %v, want %v", got, tt.want)
}
})
}
}
| 47 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.