repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package diagnostics
import (
_ "embed"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/providers"
)
type EksaDiagnosticBundleFactoryOpts struct {
AnalyzerFactory AnalyzerFactory
Client BundleClient
CollectorFactory CollectorFactory
Kubectl *executables.Kubectl
Writer filewriter.FileWriter
}
type eksaDiagnosticBundleFactory struct {
analyzerFactory AnalyzerFactory
client BundleClient
collectorFactory CollectorFactory
kubectl *executables.Kubectl
writer filewriter.FileWriter
}
func NewFactory(opts EksaDiagnosticBundleFactoryOpts) *eksaDiagnosticBundleFactory {
return &eksaDiagnosticBundleFactory{
analyzerFactory: opts.AnalyzerFactory,
client: opts.Client,
collectorFactory: opts.CollectorFactory,
kubectl: opts.Kubectl,
writer: opts.Writer,
}
}
func (f *eksaDiagnosticBundleFactory) DiagnosticBundle(spec *cluster.Spec, provider providers.Provider, kubeconfig string, bundlePath string) (DiagnosticBundle, error) {
if bundlePath == "" && spec != nil {
b, err := f.DiagnosticBundleWorkloadCluster(spec, provider, kubeconfig)
return b, err
}
return f.DiagnosticBundleCustom(kubeconfig, bundlePath), nil
}
func (f *eksaDiagnosticBundleFactory) DiagnosticBundleManagementCluster(spec *cluster.Spec, kubeconfig string) (DiagnosticBundle, error) {
return newDiagnosticBundleManagementCluster(f.analyzerFactory, f.collectorFactory, spec, f.client, f.kubectl, kubeconfig, f.writer)
}
func (f *eksaDiagnosticBundleFactory) DiagnosticBundleWorkloadCluster(spec *cluster.Spec, provider providers.Provider, kubeconfig string) (DiagnosticBundle, error) {
return newDiagnosticBundleFromSpec(f.analyzerFactory, f.collectorFactory, spec, provider, f.client, f.kubectl, kubeconfig, f.writer)
}
func (f *eksaDiagnosticBundleFactory) DiagnosticBundleDefault() DiagnosticBundle {
return newDiagnosticBundleDefault(f.analyzerFactory, f.collectorFactory)
}
func (f *eksaDiagnosticBundleFactory) DiagnosticBundleCustom(kubeconfig string, bundlePath string) DiagnosticBundle {
return newDiagnosticBundleCustom(f.analyzerFactory, f.collectorFactory, f.client, f.kubectl, bundlePath, kubeconfig, f.writer)
}
| 61 |
eks-anywhere | aws | Go | package diagnostics
import (
"context"
"time"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/providers"
)
type BundleClient interface {
Collect(ctx context.Context, bundlePath string, sinceTime *time.Time, kubeconfig string) (archivePath string, err error)
Analyze(ctx context.Context, bundleSpecPath string, archivePath string) ([]*executables.SupportBundleAnalysis, error)
}
type DiagnosticBundleFactory interface {
DiagnosticBundle(spec *cluster.Spec, provider providers.Provider, kubeconfig string, bundlePath string) (DiagnosticBundle, error)
DiagnosticBundleWorkloadCluster(spec *cluster.Spec, provider providers.Provider, kubeconfig string) (DiagnosticBundle, error)
DiagnosticBundleManagementCluster(spec *cluster.Spec, kubeconfig string) (DiagnosticBundle, error)
DiagnosticBundleDefault() DiagnosticBundle
DiagnosticBundleCustom(kubeconfig string, bundlePath string) DiagnosticBundle
}
type DiagnosticBundle interface {
PrintBundleConfig() error
WriteBundleConfig() error
PrintAnalysis() error
WriteAnalysisToFile() (path string, err error)
CollectAndAnalyze(ctx context.Context, sinceTimeValue *time.Time) error
WithDefaultAnalyzers() *EksaDiagnosticBundle
WithDefaultCollectors() *EksaDiagnosticBundle
WithFileCollectors(paths []string) *EksaDiagnosticBundle
WithDatacenterConfig(config v1alpha1.Ref, spec *cluster.Spec) *EksaDiagnosticBundle
WithOidcConfig(config *v1alpha1.OIDCConfig) *EksaDiagnosticBundle
WithExternalEtcd(config *v1alpha1.ExternalEtcdConfiguration) *EksaDiagnosticBundle
WithGitOpsConfig(config *v1alpha1.GitOpsConfig) *EksaDiagnosticBundle
WithMachineConfigs(configs []providers.MachineConfig) *EksaDiagnosticBundle
WithLogTextAnalyzers() *EksaDiagnosticBundle
}
type AnalyzerFactory interface {
DefaultAnalyzers() []*Analyze
EksaGitopsAnalyzers() []*Analyze
EksaLogTextAnalyzers(collectors []*Collect) []*Analyze
EksaOidcAnalyzers() []*Analyze
EksaExternalEtcdAnalyzers() []*Analyze
DataCenterConfigAnalyzers(datacenter v1alpha1.Ref) []*Analyze
ManagementClusterAnalyzers() []*Analyze
PackageAnalyzers() []*Analyze
}
// CollectorFactory generates support-bundle collectors.
type CollectorFactory interface {
PackagesCollectors() []*Collect
DefaultCollectors() []*Collect
FileCollectors(paths []string) []*Collect
ManagementClusterCollectors() []*Collect
EksaHostCollectors(configs []providers.MachineConfig) []*Collect
DataCenterConfigCollectors(datacenter v1alpha1.Ref, spec *cluster.Spec) []*Collect
}
| 63 |
eks-anywhere | aws | Go | package diagnostics
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
type supportBundle struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec supportBundleSpec `json:"spec,omitempty"`
}
type supportBundleSpec struct {
Collectors []*Collect `json:"collectors,omitempty"`
Analyzers []*Analyze `json:"analyzers,omitempty"`
}
type singleOutcome struct {
When string `json:"when,omitempty"`
Message string `json:"message,omitempty"`
URI string `json:"uri,omitempty"`
}
type outcome struct {
Fail *singleOutcome `json:"fail,omitempty"`
Warn *singleOutcome `json:"warn,omitempty"`
Pass *singleOutcome `json:"pass,omitempty"`
}
| 28 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/diagnostics/interfaces.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
time "time"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
diagnostics "github.com/aws/eks-anywhere/pkg/diagnostics"
executables "github.com/aws/eks-anywhere/pkg/executables"
providers "github.com/aws/eks-anywhere/pkg/providers"
gomock "github.com/golang/mock/gomock"
)
// MockBundleClient is a mock of BundleClient interface.
type MockBundleClient struct {
ctrl *gomock.Controller
recorder *MockBundleClientMockRecorder
}
// MockBundleClientMockRecorder is the mock recorder for MockBundleClient.
type MockBundleClientMockRecorder struct {
mock *MockBundleClient
}
// NewMockBundleClient creates a new mock instance.
func NewMockBundleClient(ctrl *gomock.Controller) *MockBundleClient {
mock := &MockBundleClient{ctrl: ctrl}
mock.recorder = &MockBundleClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockBundleClient) EXPECT() *MockBundleClientMockRecorder {
return m.recorder
}
// Analyze mocks base method.
func (m *MockBundleClient) Analyze(ctx context.Context, bundleSpecPath, archivePath string) ([]*executables.SupportBundleAnalysis, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Analyze", ctx, bundleSpecPath, archivePath)
ret0, _ := ret[0].([]*executables.SupportBundleAnalysis)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Analyze indicates an expected call of Analyze.
func (mr *MockBundleClientMockRecorder) Analyze(ctx, bundleSpecPath, archivePath interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Analyze", reflect.TypeOf((*MockBundleClient)(nil).Analyze), ctx, bundleSpecPath, archivePath)
}
// Collect mocks base method.
func (m *MockBundleClient) Collect(ctx context.Context, bundlePath string, sinceTime *time.Time, kubeconfig string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Collect", ctx, bundlePath, sinceTime, kubeconfig)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Collect indicates an expected call of Collect.
func (mr *MockBundleClientMockRecorder) Collect(ctx, bundlePath, sinceTime, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Collect", reflect.TypeOf((*MockBundleClient)(nil).Collect), ctx, bundlePath, sinceTime, kubeconfig)
}
// MockDiagnosticBundleFactory is a mock of DiagnosticBundleFactory interface.
type MockDiagnosticBundleFactory struct {
ctrl *gomock.Controller
recorder *MockDiagnosticBundleFactoryMockRecorder
}
// MockDiagnosticBundleFactoryMockRecorder is the mock recorder for MockDiagnosticBundleFactory.
type MockDiagnosticBundleFactoryMockRecorder struct {
mock *MockDiagnosticBundleFactory
}
// NewMockDiagnosticBundleFactory creates a new mock instance.
func NewMockDiagnosticBundleFactory(ctrl *gomock.Controller) *MockDiagnosticBundleFactory {
mock := &MockDiagnosticBundleFactory{ctrl: ctrl}
mock.recorder = &MockDiagnosticBundleFactoryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDiagnosticBundleFactory) EXPECT() *MockDiagnosticBundleFactoryMockRecorder {
return m.recorder
}
// DiagnosticBundle mocks base method.
func (m *MockDiagnosticBundleFactory) DiagnosticBundle(spec *cluster.Spec, provider providers.Provider, kubeconfig, bundlePath string) (diagnostics.DiagnosticBundle, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiagnosticBundle", spec, provider, kubeconfig, bundlePath)
ret0, _ := ret[0].(diagnostics.DiagnosticBundle)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DiagnosticBundle indicates an expected call of DiagnosticBundle.
func (mr *MockDiagnosticBundleFactoryMockRecorder) DiagnosticBundle(spec, provider, kubeconfig, bundlePath interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiagnosticBundle", reflect.TypeOf((*MockDiagnosticBundleFactory)(nil).DiagnosticBundle), spec, provider, kubeconfig, bundlePath)
}
// DiagnosticBundleCustom mocks base method.
func (m *MockDiagnosticBundleFactory) DiagnosticBundleCustom(kubeconfig, bundlePath string) diagnostics.DiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiagnosticBundleCustom", kubeconfig, bundlePath)
ret0, _ := ret[0].(diagnostics.DiagnosticBundle)
return ret0
}
// DiagnosticBundleCustom indicates an expected call of DiagnosticBundleCustom.
func (mr *MockDiagnosticBundleFactoryMockRecorder) DiagnosticBundleCustom(kubeconfig, bundlePath interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiagnosticBundleCustom", reflect.TypeOf((*MockDiagnosticBundleFactory)(nil).DiagnosticBundleCustom), kubeconfig, bundlePath)
}
// DiagnosticBundleDefault mocks base method.
func (m *MockDiagnosticBundleFactory) DiagnosticBundleDefault() diagnostics.DiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiagnosticBundleDefault")
ret0, _ := ret[0].(diagnostics.DiagnosticBundle)
return ret0
}
// DiagnosticBundleDefault indicates an expected call of DiagnosticBundleDefault.
func (mr *MockDiagnosticBundleFactoryMockRecorder) DiagnosticBundleDefault() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiagnosticBundleDefault", reflect.TypeOf((*MockDiagnosticBundleFactory)(nil).DiagnosticBundleDefault))
}
// DiagnosticBundleManagementCluster mocks base method.
func (m *MockDiagnosticBundleFactory) DiagnosticBundleManagementCluster(spec *cluster.Spec, kubeconfig string) (diagnostics.DiagnosticBundle, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiagnosticBundleManagementCluster", spec, kubeconfig)
ret0, _ := ret[0].(diagnostics.DiagnosticBundle)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DiagnosticBundleManagementCluster indicates an expected call of DiagnosticBundleManagementCluster.
func (mr *MockDiagnosticBundleFactoryMockRecorder) DiagnosticBundleManagementCluster(spec, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiagnosticBundleManagementCluster", reflect.TypeOf((*MockDiagnosticBundleFactory)(nil).DiagnosticBundleManagementCluster), spec, kubeconfig)
}
// DiagnosticBundleWorkloadCluster mocks base method.
func (m *MockDiagnosticBundleFactory) DiagnosticBundleWorkloadCluster(spec *cluster.Spec, provider providers.Provider, kubeconfig string) (diagnostics.DiagnosticBundle, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiagnosticBundleWorkloadCluster", spec, provider, kubeconfig)
ret0, _ := ret[0].(diagnostics.DiagnosticBundle)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DiagnosticBundleWorkloadCluster indicates an expected call of DiagnosticBundleWorkloadCluster.
func (mr *MockDiagnosticBundleFactoryMockRecorder) DiagnosticBundleWorkloadCluster(spec, provider, kubeconfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiagnosticBundleWorkloadCluster", reflect.TypeOf((*MockDiagnosticBundleFactory)(nil).DiagnosticBundleWorkloadCluster), spec, provider, kubeconfig)
}
// MockDiagnosticBundle is a mock of DiagnosticBundle interface.
type MockDiagnosticBundle struct {
ctrl *gomock.Controller
recorder *MockDiagnosticBundleMockRecorder
}
// MockDiagnosticBundleMockRecorder is the mock recorder for MockDiagnosticBundle.
type MockDiagnosticBundleMockRecorder struct {
mock *MockDiagnosticBundle
}
// NewMockDiagnosticBundle creates a new mock instance.
func NewMockDiagnosticBundle(ctrl *gomock.Controller) *MockDiagnosticBundle {
mock := &MockDiagnosticBundle{ctrl: ctrl}
mock.recorder = &MockDiagnosticBundleMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDiagnosticBundle) EXPECT() *MockDiagnosticBundleMockRecorder {
return m.recorder
}
// CollectAndAnalyze mocks base method.
func (m *MockDiagnosticBundle) CollectAndAnalyze(ctx context.Context, sinceTimeValue *time.Time) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CollectAndAnalyze", ctx, sinceTimeValue)
ret0, _ := ret[0].(error)
return ret0
}
// CollectAndAnalyze indicates an expected call of CollectAndAnalyze.
func (mr *MockDiagnosticBundleMockRecorder) CollectAndAnalyze(ctx, sinceTimeValue interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CollectAndAnalyze", reflect.TypeOf((*MockDiagnosticBundle)(nil).CollectAndAnalyze), ctx, sinceTimeValue)
}
// PrintAnalysis mocks base method.
func (m *MockDiagnosticBundle) PrintAnalysis() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrintAnalysis")
ret0, _ := ret[0].(error)
return ret0
}
// PrintAnalysis indicates an expected call of PrintAnalysis.
func (mr *MockDiagnosticBundleMockRecorder) PrintAnalysis() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrintAnalysis", reflect.TypeOf((*MockDiagnosticBundle)(nil).PrintAnalysis))
}
// PrintBundleConfig mocks base method.
func (m *MockDiagnosticBundle) PrintBundleConfig() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrintBundleConfig")
ret0, _ := ret[0].(error)
return ret0
}
// PrintBundleConfig indicates an expected call of PrintBundleConfig.
func (mr *MockDiagnosticBundleMockRecorder) PrintBundleConfig() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrintBundleConfig", reflect.TypeOf((*MockDiagnosticBundle)(nil).PrintBundleConfig))
}
// WithDatacenterConfig mocks base method.
func (m *MockDiagnosticBundle) WithDatacenterConfig(config v1alpha1.Ref, spec *cluster.Spec) *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithDatacenterConfig", config, spec)
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithDatacenterConfig indicates an expected call of WithDatacenterConfig.
func (mr *MockDiagnosticBundleMockRecorder) WithDatacenterConfig(config, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithDatacenterConfig", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithDatacenterConfig), config, spec)
}
// WithDefaultAnalyzers mocks base method.
func (m *MockDiagnosticBundle) WithDefaultAnalyzers() *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithDefaultAnalyzers")
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithDefaultAnalyzers indicates an expected call of WithDefaultAnalyzers.
func (mr *MockDiagnosticBundleMockRecorder) WithDefaultAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithDefaultAnalyzers", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithDefaultAnalyzers))
}
// WithDefaultCollectors mocks base method.
func (m *MockDiagnosticBundle) WithDefaultCollectors() *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithDefaultCollectors")
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithDefaultCollectors indicates an expected call of WithDefaultCollectors.
func (mr *MockDiagnosticBundleMockRecorder) WithDefaultCollectors() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithDefaultCollectors", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithDefaultCollectors))
}
// WithExternalEtcd mocks base method.
func (m *MockDiagnosticBundle) WithExternalEtcd(config *v1alpha1.ExternalEtcdConfiguration) *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithExternalEtcd", config)
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithExternalEtcd indicates an expected call of WithExternalEtcd.
func (mr *MockDiagnosticBundleMockRecorder) WithExternalEtcd(config interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithExternalEtcd", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithExternalEtcd), config)
}
// WithFileCollectors mocks base method.
func (m *MockDiagnosticBundle) WithFileCollectors(paths []string) *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithFileCollectors", paths)
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithFileCollectors indicates an expected call of WithFileCollectors.
func (mr *MockDiagnosticBundleMockRecorder) WithFileCollectors(paths interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithFileCollectors", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithFileCollectors), paths)
}
// WithGitOpsConfig mocks base method.
func (m *MockDiagnosticBundle) WithGitOpsConfig(config *v1alpha1.GitOpsConfig) *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithGitOpsConfig", config)
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithGitOpsConfig indicates an expected call of WithGitOpsConfig.
func (mr *MockDiagnosticBundleMockRecorder) WithGitOpsConfig(config interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithGitOpsConfig", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithGitOpsConfig), config)
}
// WithLogTextAnalyzers mocks base method.
func (m *MockDiagnosticBundle) WithLogTextAnalyzers() *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithLogTextAnalyzers")
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithLogTextAnalyzers indicates an expected call of WithLogTextAnalyzers.
func (mr *MockDiagnosticBundleMockRecorder) WithLogTextAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithLogTextAnalyzers", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithLogTextAnalyzers))
}
// WithMachineConfigs mocks base method.
func (m *MockDiagnosticBundle) WithMachineConfigs(configs []providers.MachineConfig) *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithMachineConfigs", configs)
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithMachineConfigs indicates an expected call of WithMachineConfigs.
func (mr *MockDiagnosticBundleMockRecorder) WithMachineConfigs(configs interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithMachineConfigs", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithMachineConfigs), configs)
}
// WithOidcConfig mocks base method.
func (m *MockDiagnosticBundle) WithOidcConfig(config *v1alpha1.OIDCConfig) *diagnostics.EksaDiagnosticBundle {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithOidcConfig", config)
ret0, _ := ret[0].(*diagnostics.EksaDiagnosticBundle)
return ret0
}
// WithOidcConfig indicates an expected call of WithOidcConfig.
func (mr *MockDiagnosticBundleMockRecorder) WithOidcConfig(config interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithOidcConfig", reflect.TypeOf((*MockDiagnosticBundle)(nil).WithOidcConfig), config)
}
// WriteAnalysisToFile mocks base method.
func (m *MockDiagnosticBundle) WriteAnalysisToFile() (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteAnalysisToFile")
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WriteAnalysisToFile indicates an expected call of WriteAnalysisToFile.
func (mr *MockDiagnosticBundleMockRecorder) WriteAnalysisToFile() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteAnalysisToFile", reflect.TypeOf((*MockDiagnosticBundle)(nil).WriteAnalysisToFile))
}
// WriteBundleConfig mocks base method.
func (m *MockDiagnosticBundle) WriteBundleConfig() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteBundleConfig")
ret0, _ := ret[0].(error)
return ret0
}
// WriteBundleConfig indicates an expected call of WriteBundleConfig.
func (mr *MockDiagnosticBundleMockRecorder) WriteBundleConfig() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBundleConfig", reflect.TypeOf((*MockDiagnosticBundle)(nil).WriteBundleConfig))
}
// MockAnalyzerFactory is a mock of AnalyzerFactory interface.
type MockAnalyzerFactory struct {
ctrl *gomock.Controller
recorder *MockAnalyzerFactoryMockRecorder
}
// MockAnalyzerFactoryMockRecorder is the mock recorder for MockAnalyzerFactory.
type MockAnalyzerFactoryMockRecorder struct {
mock *MockAnalyzerFactory
}
// NewMockAnalyzerFactory creates a new mock instance.
func NewMockAnalyzerFactory(ctrl *gomock.Controller) *MockAnalyzerFactory {
mock := &MockAnalyzerFactory{ctrl: ctrl}
mock.recorder = &MockAnalyzerFactoryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockAnalyzerFactory) EXPECT() *MockAnalyzerFactoryMockRecorder {
return m.recorder
}
// DataCenterConfigAnalyzers mocks base method.
func (m *MockAnalyzerFactory) DataCenterConfigAnalyzers(datacenter v1alpha1.Ref) []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DataCenterConfigAnalyzers", datacenter)
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// DataCenterConfigAnalyzers indicates an expected call of DataCenterConfigAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) DataCenterConfigAnalyzers(datacenter interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataCenterConfigAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).DataCenterConfigAnalyzers), datacenter)
}
// DefaultAnalyzers mocks base method.
func (m *MockAnalyzerFactory) DefaultAnalyzers() []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DefaultAnalyzers")
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// DefaultAnalyzers indicates an expected call of DefaultAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) DefaultAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).DefaultAnalyzers))
}
// EksaExternalEtcdAnalyzers mocks base method.
func (m *MockAnalyzerFactory) EksaExternalEtcdAnalyzers() []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EksaExternalEtcdAnalyzers")
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// EksaExternalEtcdAnalyzers indicates an expected call of EksaExternalEtcdAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) EksaExternalEtcdAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EksaExternalEtcdAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).EksaExternalEtcdAnalyzers))
}
// EksaGitopsAnalyzers mocks base method.
func (m *MockAnalyzerFactory) EksaGitopsAnalyzers() []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EksaGitopsAnalyzers")
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// EksaGitopsAnalyzers indicates an expected call of EksaGitopsAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) EksaGitopsAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EksaGitopsAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).EksaGitopsAnalyzers))
}
// EksaLogTextAnalyzers mocks base method.
func (m *MockAnalyzerFactory) EksaLogTextAnalyzers(collectors []*diagnostics.Collect) []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EksaLogTextAnalyzers", collectors)
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// EksaLogTextAnalyzers indicates an expected call of EksaLogTextAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) EksaLogTextAnalyzers(collectors interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EksaLogTextAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).EksaLogTextAnalyzers), collectors)
}
// EksaOidcAnalyzers mocks base method.
func (m *MockAnalyzerFactory) EksaOidcAnalyzers() []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EksaOidcAnalyzers")
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// EksaOidcAnalyzers indicates an expected call of EksaOidcAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) EksaOidcAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EksaOidcAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).EksaOidcAnalyzers))
}
// ManagementClusterAnalyzers mocks base method.
func (m *MockAnalyzerFactory) ManagementClusterAnalyzers() []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ManagementClusterAnalyzers")
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// ManagementClusterAnalyzers indicates an expected call of ManagementClusterAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) ManagementClusterAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ManagementClusterAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).ManagementClusterAnalyzers))
}
// PackageAnalyzers mocks base method.
func (m *MockAnalyzerFactory) PackageAnalyzers() []*diagnostics.Analyze {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PackageAnalyzers")
ret0, _ := ret[0].([]*diagnostics.Analyze)
return ret0
}
// PackageAnalyzers indicates an expected call of PackageAnalyzers.
func (mr *MockAnalyzerFactoryMockRecorder) PackageAnalyzers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PackageAnalyzers", reflect.TypeOf((*MockAnalyzerFactory)(nil).PackageAnalyzers))
}
// MockCollectorFactory is a mock of CollectorFactory interface.
type MockCollectorFactory struct {
ctrl *gomock.Controller
recorder *MockCollectorFactoryMockRecorder
}
// MockCollectorFactoryMockRecorder is the mock recorder for MockCollectorFactory.
type MockCollectorFactoryMockRecorder struct {
mock *MockCollectorFactory
}
// NewMockCollectorFactory creates a new mock instance.
func NewMockCollectorFactory(ctrl *gomock.Controller) *MockCollectorFactory {
mock := &MockCollectorFactory{ctrl: ctrl}
mock.recorder = &MockCollectorFactoryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCollectorFactory) EXPECT() *MockCollectorFactoryMockRecorder {
return m.recorder
}
// DataCenterConfigCollectors mocks base method.
func (m *MockCollectorFactory) DataCenterConfigCollectors(datacenter v1alpha1.Ref, spec *cluster.Spec) []*diagnostics.Collect {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DataCenterConfigCollectors", datacenter, spec)
ret0, _ := ret[0].([]*diagnostics.Collect)
return ret0
}
// DataCenterConfigCollectors indicates an expected call of DataCenterConfigCollectors.
func (mr *MockCollectorFactoryMockRecorder) DataCenterConfigCollectors(datacenter, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataCenterConfigCollectors", reflect.TypeOf((*MockCollectorFactory)(nil).DataCenterConfigCollectors), datacenter, spec)
}
// DefaultCollectors mocks base method.
func (m *MockCollectorFactory) DefaultCollectors() []*diagnostics.Collect {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DefaultCollectors")
ret0, _ := ret[0].([]*diagnostics.Collect)
return ret0
}
// DefaultCollectors indicates an expected call of DefaultCollectors.
func (mr *MockCollectorFactoryMockRecorder) DefaultCollectors() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultCollectors", reflect.TypeOf((*MockCollectorFactory)(nil).DefaultCollectors))
}
// EksaHostCollectors mocks base method.
func (m *MockCollectorFactory) EksaHostCollectors(configs []providers.MachineConfig) []*diagnostics.Collect {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EksaHostCollectors", configs)
ret0, _ := ret[0].([]*diagnostics.Collect)
return ret0
}
// EksaHostCollectors indicates an expected call of EksaHostCollectors.
func (mr *MockCollectorFactoryMockRecorder) EksaHostCollectors(configs interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EksaHostCollectors", reflect.TypeOf((*MockCollectorFactory)(nil).EksaHostCollectors), configs)
}
// FileCollectors mocks base method.
func (m *MockCollectorFactory) FileCollectors(paths []string) []*diagnostics.Collect {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FileCollectors", paths)
ret0, _ := ret[0].([]*diagnostics.Collect)
return ret0
}
// FileCollectors indicates an expected call of FileCollectors.
func (mr *MockCollectorFactoryMockRecorder) FileCollectors(paths interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FileCollectors", reflect.TypeOf((*MockCollectorFactory)(nil).FileCollectors), paths)
}
// ManagementClusterCollectors mocks base method.
func (m *MockCollectorFactory) ManagementClusterCollectors() []*diagnostics.Collect {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ManagementClusterCollectors")
ret0, _ := ret[0].([]*diagnostics.Collect)
return ret0
}
// ManagementClusterCollectors indicates an expected call of ManagementClusterCollectors.
func (mr *MockCollectorFactoryMockRecorder) ManagementClusterCollectors() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ManagementClusterCollectors", reflect.TypeOf((*MockCollectorFactory)(nil).ManagementClusterCollectors))
}
// PackagesCollectors mocks base method.
func (m *MockCollectorFactory) PackagesCollectors() []*diagnostics.Collect {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PackagesCollectors")
ret0, _ := ret[0].([]*diagnostics.Collect)
return ret0
}
// PackagesCollectors indicates an expected call of PackagesCollectors.
func (mr *MockCollectorFactoryMockRecorder) PackagesCollectors() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PackagesCollectors", reflect.TypeOf((*MockCollectorFactory)(nil).PackagesCollectors))
}
| 630 |
eks-anywhere | aws | Go | package docker
import (
"context"
"fmt"
"sync"
)
type ConcurrentImageProcessor struct {
maxRoutines int
}
func NewConcurrentImageProcessor(maxRoutines int) *ConcurrentImageProcessor {
return &ConcurrentImageProcessor{maxRoutines: maxRoutines}
}
type ImageProcessor func(ctx context.Context, image string) error
func (c *ConcurrentImageProcessor) Process(ctx context.Context, images []string, process ImageProcessor) error {
contextWithCancel, abortRemainingJobs := context.WithCancel(ctx)
workers := min(len(images), c.maxRoutines)
workers = max(workers, 1)
jobsChan := make(chan job)
wg := &sync.WaitGroup{}
errors := make(chan error)
doneChan := make(chan struct{})
for i := 0; i < workers; i++ {
w := &worker{
jobs: jobsChan,
process: process,
waitGroup: wg,
errorReturn: errors,
}
wg.Add(1)
go w.start()
}
f := &feeder{
jobs: jobsChan,
images: images,
workersWaiGroup: wg,
done: doneChan,
}
go f.feed(contextWithCancel)
var firstError error
loop:
for {
select {
case <-doneChan:
break loop
case err := <-errors:
if firstError == nil {
firstError = err
abortRemainingJobs()
}
}
}
// This is not necessary because if we get here all workers are done, regardless if all the jobs
// were run or not, so canceling the context is not necessary since there is nothing else using it
// This is just to avoid a lint warning for possible context leeking
abortRemainingJobs()
if firstError != nil {
return fmt.Errorf("image processor worker failed, rest of jobs were aborted: %v", firstError)
}
return nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
type job struct {
ctx context.Context
image string
}
type feeder struct {
jobs chan<- job
images []string
done chan<- struct{}
workersWaiGroup *sync.WaitGroup
}
func (f *feeder) feed(ctx context.Context) {
defer func() {
close(f.jobs)
f.workersWaiGroup.Wait()
close(f.done)
}()
for _, i := range f.images {
select {
case <-ctx.Done():
return
default:
j := job{
ctx: ctx,
image: i,
}
f.jobs <- j
}
}
}
type worker struct {
jobs <-chan job
process ImageProcessor
waitGroup *sync.WaitGroup
errorReturn chan<- error
}
func (w *worker) start() {
defer w.waitGroup.Done()
for j := range w.jobs {
if err := w.process(j.ctx, j.image); err != nil {
w.errorReturn <- err
}
}
}
| 137 |
eks-anywhere | aws | Go | package docker_test
import (
"context"
"errors"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/docker"
)
func TestConcurrentImageProcessorProcessSuccess(t *testing.T) {
tests := []struct {
name string
images []string
maxRoutines int
}{
{
name: "more jobs than routines",
images: []string{"image1:1", "image2:2", "images3:3"},
maxRoutines: 2,
},
{
name: "same jobs than routines",
images: []string{"image1:1", "image2:2", "images3:3"},
maxRoutines: 3,
},
{
name: "less jobs than routines",
images: []string{"image1:1", "image2:2", "images3:3"},
maxRoutines: 4,
},
{
name: "zero routines",
images: []string{"image1:1", "image2:2", "images3:3"},
maxRoutines: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
processor := docker.NewConcurrentImageProcessor(tt.maxRoutines)
process := func(_ context.Context, _ string) error {
return nil
}
g.Expect(processor.Process(ctx, tt.images, process)).To(Succeed())
})
}
}
func TestConcurrentImageProcessorProcessError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
images := []string{"image1:1", "image2:2", "images3:3"}
processor := docker.NewConcurrentImageProcessor(2)
process := func(_ context.Context, i string) error {
if i == "image2:2" {
return errors.New("processing error")
}
return nil
}
g.Expect(processor.Process(ctx, images, process)).To(
MatchError(ContainSubstring("image processor worker failed, rest of jobs were aborted: processing error")),
)
}
func TestConcurrentImageProcessorProcessErrorWithJobsBeingCancelled(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
images := []string{"image1:1", "image2:2", "images3:3"}
processor := docker.NewConcurrentImageProcessor(2)
process := func(ctx context.Context, i string) error {
if i == "image2:2" {
return errors.New("processing error")
}
// Block until context gets cancelled to trigger the flow
// where jobs get cancelled after first error
<-ctx.Done()
return nil
}
g.Expect(processor.Process(ctx, images, process)).To(
MatchError(ContainSubstring("image processor worker failed, rest of jobs were aborted: processing error")),
)
}
func TestConcurrentImageProcessorProcessCancelParentContext(t *testing.T) {
g := NewWithT(t)
ctx, cancel := context.WithCancel(context.Background())
images := []string{"image1:1", "image2:2", "images3:3"}
processor := docker.NewConcurrentImageProcessor(2)
process := func(ctx context.Context, i string) error {
<-ctx.Done()
return nil
}
cancel()
g.Expect(processor.Process(ctx, images, process)).To(Succeed())
}
| 114 |
eks-anywhere | aws | Go | package docker
import (
"context"
"github.com/aws/eks-anywhere/pkg/logger"
)
// ImageDiskSource implements the ImageSource interface, loading images and tags from
// a tarbal into the local docker cache.
type ImageDiskSource struct {
client ImageDiskLoader
file string
}
func NewDiskSource(client ImageDiskLoader, file string) *ImageDiskSource {
return &ImageDiskSource{
client: client,
file: file,
}
}
// Load reads images and tags from a tarbal into the local docker cache.
func (s *ImageDiskSource) Load(ctx context.Context, images ...string) error {
logger.Info("Loading images from disk")
return s.client.LoadFromFile(ctx, s.file)
}
// ImageDiskDestination implements the ImageDestination interface, writing images and tags from
// from the local docker cache into a tarbal.
type ImageDiskDestination struct {
client ImageDiskWriter
file string
}
func NewDiskDestination(client ImageDiskWriter, file string) *ImageDiskDestination {
return &ImageDiskDestination{
client: client,
file: file,
}
}
// Write creates a tarball including images and tags from the the local docker cache.
func (s *ImageDiskDestination) Write(ctx context.Context, images ...string) error {
logger.Info("Writing images to disk")
return s.client.SaveToFile(ctx, s.file, images...)
}
| 48 |
eks-anywhere | aws | Go | package docker_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/docker"
"github.com/aws/eks-anywhere/pkg/docker/mocks"
)
func TestNewDiskSource(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockDockerClient(ctrl)
file := "file"
images := []string{"image1:1", "image2:2"}
ctx := context.Background()
sourceLoader := docker.NewDiskSource(client, file)
client.EXPECT().LoadFromFile(ctx, file)
g.Expect(sourceLoader.Load(ctx, images...)).To(Succeed())
}
func TestNewDiskDestination(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockDockerClient(ctrl)
file := "file"
images := []string{"image1:1", "image2:2"}
ctx := context.Background()
dstLoader := docker.NewDiskDestination(client, file)
client.EXPECT().SaveToFile(ctx, file, images[0], images[1])
g.Expect(dstLoader.Write(ctx, images...)).To(Succeed())
}
| 41 |
eks-anywhere | aws | Go | package docker
import (
"context"
"fmt"
"sort"
"github.com/aws/eks-anywhere/pkg/types"
)
type ImageDiskLoader interface {
LoadFromFile(ctx context.Context, filepath string) error
}
type ImageDiskWriter interface {
SaveToFile(ctx context.Context, filepath string, images ...string) error
}
type ImageTaggerPusher interface {
PushImage(ctx context.Context, image string, endpoint string) error
TagImage(ctx context.Context, image string, endpoint string) error
}
type ImagePuller interface {
PullImage(ctx context.Context, image string) error
}
type DockerClient interface {
ImageDiskLoader
ImageDiskWriter
ImagePuller
}
// ImageSource represents a generic source for container images that can be loaded
// into the local docker cache.
type ImageSource interface {
Load(ctx context.Context, images ...string) error
}
// ImageDestination represents a generic destination for container images that
// can be written from the local docker cache.
type ImageDestination interface {
Write(ctx context.Context, images ...string) error
}
// ImageMover orchestrates loading images from a source and writing them to a destination.
type ImageMover struct {
source ImageSource
destination ImageDestination
}
func NewImageMover(source ImageSource, destination ImageDestination) *ImageMover {
return &ImageMover{
source: source,
destination: destination,
}
}
// Move loads images from source and writes them to the destination.
func (m *ImageMover) Move(ctx context.Context, images ...string) error {
uniqueImages := removesDuplicates(images)
if err := m.source.Load(ctx, uniqueImages...); err != nil {
return fmt.Errorf("loading docker image mover source: %v", err)
}
if err := m.destination.Write(ctx, uniqueImages...); err != nil {
return fmt.Errorf("writing images to destination with image mover: %v", err)
}
return nil
}
func removesDuplicates(images []string) []string {
i := types.SliceToLookup(images).ToSlice()
sort.Strings(i)
return i
}
| 79 |
eks-anywhere | aws | Go | package docker_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/docker"
"github.com/aws/eks-anywhere/pkg/docker/mocks"
)
type moverTest struct {
*WithT
ctx context.Context
src *mocks.MockImageSource
dst *mocks.MockImageDestination
images []string
}
func newMoverTest(t *testing.T) *moverTest {
ctrl := gomock.NewController(t)
return &moverTest{
WithT: NewWithT(t),
ctx: context.Background(),
src: mocks.NewMockImageSource(ctrl),
dst: mocks.NewMockImageDestination(ctrl),
images: []string{"image1:1", "image2:2", "image1:1", "image2:2"},
}
}
func TestImageMoverMove(t *testing.T) {
tt := newMoverTest(t)
tt.src.EXPECT().Load(tt.ctx, tt.images[0], tt.images[1])
tt.dst.EXPECT().Write(tt.ctx, tt.images[0], tt.images[1])
m := docker.NewImageMover(tt.src, tt.dst)
tt.Expect(m.Move(tt.ctx, tt.images...)).To(Succeed())
}
func TestImageMoverMoveErrorSource(t *testing.T) {
tt := newMoverTest(t)
errorMsg := "fake error"
tt.src.EXPECT().Load(tt.ctx, tt.images[0], tt.images[1]).Return(errors.New(errorMsg))
m := docker.NewImageMover(tt.src, tt.dst)
tt.Expect(m.Move(tt.ctx, tt.images...)).To(MatchError("loading docker image mover source: fake error"))
}
func TestImageMoverMoveErrorDestination(t *testing.T) {
tt := newMoverTest(t)
errorMsg := "fake error"
tt.src.EXPECT().Load(tt.ctx, tt.images[0], tt.images[1])
tt.dst.EXPECT().Write(tt.ctx, tt.images[0], tt.images[1]).Return(errors.New(errorMsg))
m := docker.NewImageMover(tt.src, tt.dst)
tt.Expect(m.Move(tt.ctx, tt.images...)).To(MatchError("writing images to destination with image mover: fake error"))
}
| 64 |
eks-anywhere | aws | Go | package docker
import (
"context"
"fmt"
"runtime"
"strings"
"github.com/aws/eks-anywhere/pkg/logger"
)
// These constants are temporary since currently there is a limitation on harbor
// Harbor requires root level projects but curated packages private account currently
// doesn't have support for root level.
const (
packageProdDomain = "783794618700.dkr.ecr.us-west-2.amazonaws.com"
packageDevDomain = "857151390494.dkr.ecr.us-west-2.amazonaws.com"
publicProdECRName = "eks-anywhere"
publicDevECRName = "l0g8r8j6"
)
// ImageRegistryDestination implements the ImageDestination interface, writing images and tags from
// from the local docker cache to an external registry.
type ImageRegistryDestination struct {
client ImageTaggerPusher
endpoint string
processor *ConcurrentImageProcessor
}
func NewRegistryDestination(client ImageTaggerPusher, registryEndpoint string) *ImageRegistryDestination {
return &ImageRegistryDestination{
client: client,
endpoint: registryEndpoint,
processor: NewConcurrentImageProcessor(runtime.GOMAXPROCS(0)),
}
}
// Write pushes images and tags from from the local docker cache to an external registry.
func (d *ImageRegistryDestination) Write(ctx context.Context, images ...string) error {
logger.Info("Writing images to registry")
logger.V(3).Info("Starting registry write", "numberOfImages", len(images))
err := d.processor.Process(ctx, images, func(ctx context.Context, image string) error {
endpoint := getUpdatedEndpoint(d.endpoint, image)
image = removeDigestReference(image)
if err := d.client.TagImage(ctx, image, endpoint); err != nil {
return err
}
if err := d.client.PushImage(ctx, image, endpoint); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// ImageOriginalRegistrySource implements the ImageSource interface, pulling images and tags from
// their original registry into the local docker cache.
type ImageOriginalRegistrySource struct {
client ImagePuller
processor *ConcurrentImageProcessor
}
func NewOriginalRegistrySource(client ImagePuller) *ImageOriginalRegistrySource {
return &ImageOriginalRegistrySource{
client: client,
processor: NewConcurrentImageProcessor(runtime.GOMAXPROCS(0)),
}
}
// Load pulls images and tags from their original registry into the local docker cache.
func (s *ImageOriginalRegistrySource) Load(ctx context.Context, images ...string) error {
logger.Info("Pulling images from origin, this might take a while")
logger.V(3).Info("Starting pull", "numberOfImages", len(images))
err := s.processor.Process(ctx, images, func(ctx context.Context, image string) error {
if err := s.client.PullImage(ctx, image); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// Currently private curated packages don't have a root level project
// This method adds a root level projectName to the endpoint.
func getUpdatedEndpoint(originalEndpoint, image string) string {
if strings.Contains(image, packageDevDomain) {
return originalEndpoint + "/" + publicDevECRName
}
if strings.Contains(image, packageProdDomain) {
return originalEndpoint + "/" + publicProdECRName
}
return originalEndpoint
}
// Curated packages are currently referenced by digest
// Docker doesn't support tagging images with digest
// This method extracts any @ in the image tag.
func removeDigestReference(image string) string {
imageSplit := strings.Split(image, "@")
if len(imageSplit) < 2 {
return image
}
imageLocation, digest := imageSplit[0], imageSplit[1]
digestSplit := strings.Split(digest, ":")
return fmt.Sprintf("%s:%s", imageLocation, digestSplit[1])
}
| 119 |
eks-anywhere | aws | Go | package docker_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/docker"
"github.com/aws/eks-anywhere/pkg/docker/mocks"
)
func TestNewRegistryDestination(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockImageTaggerPusher(ctrl)
registry := "https://registry"
images := []string{"image1:1", "image2:2"}
ctx := context.Background()
dstLoader := docker.NewRegistryDestination(client, registry)
for _, i := range images {
client.EXPECT().TagImage(test.AContext(), i, registry)
client.EXPECT().PushImage(test.AContext(), i, registry)
}
g.Expect(dstLoader.Write(ctx, images...)).To(Succeed())
}
func TestNewRegistryDestinationWhenDigestSpecified(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockImageTaggerPusher(ctrl)
registry := "https://registry"
image := "image1@sha256:v1"
expectedImage := "image1:v1"
ctx := context.Background()
dstLoader := docker.NewRegistryDestination(client, registry)
client.EXPECT().TagImage(test.AContext(), expectedImage, registry)
client.EXPECT().PushImage(test.AContext(), expectedImage, registry)
g.Expect(dstLoader.Write(ctx, image)).To(Succeed())
}
func TestNewRegistryDestinationWhenPackagesDevProvided(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockImageTaggerPusher(ctrl)
registry := "https://registry"
expectedRegistry := "https://registry/l0g8r8j6"
image := "857151390494.dkr.ecr.us-west-2.amazonaws.com:v1"
ctx := context.Background()
dstLoader := docker.NewRegistryDestination(client, registry)
client.EXPECT().TagImage(test.AContext(), image, expectedRegistry)
client.EXPECT().PushImage(test.AContext(), image, expectedRegistry)
g.Expect(dstLoader.Write(ctx, image)).To(Succeed())
}
func TestNewRegistryDestinationWhenPackagesProdProvided(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockImageTaggerPusher(ctrl)
registry := "https://registry"
expectedRegistry := "https://registry/eks-anywhere"
image := "783794618700.dkr.ecr.us-west-2.amazonaws.com:v1"
ctx := context.Background()
dstLoader := docker.NewRegistryDestination(client, registry)
client.EXPECT().TagImage(test.AContext(), image, expectedRegistry)
client.EXPECT().PushImage(test.AContext(), image, expectedRegistry)
g.Expect(dstLoader.Write(ctx, image)).To(Succeed())
}
func TestNewRegistryDestinationErrorTag(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockImageTaggerPusher(ctrl)
registry := "https://registry"
images := []string{"image1:1", "image2:2"}
ctx := context.Background()
dstLoader := docker.NewRegistryDestination(client, registry)
client.EXPECT().TagImage(test.AContext(), images[0], registry).Return(errors.New("error tagging"))
client.EXPECT().TagImage(test.AContext(), images[1], registry).MaxTimes(1)
client.EXPECT().PushImage(test.AContext(), images[1], registry).MaxTimes(1)
g.Expect(dstLoader.Write(ctx, images...)).To(MatchError(ContainSubstring("error tagging")))
}
func TestNewRegistryDestinationErrorPush(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockImageTaggerPusher(ctrl)
registry := "https://registry"
images := []string{"image1:1", "image2:2"}
ctx := context.Background()
dstLoader := docker.NewRegistryDestination(client, registry)
client.EXPECT().TagImage(test.AContext(), images[0], registry)
client.EXPECT().PushImage(test.AContext(), images[0], registry).Return(errors.New("error pushing"))
client.EXPECT().TagImage(test.AContext(), images[1], registry).MaxTimes(1)
client.EXPECT().PushImage(test.AContext(), images[1], registry).MaxTimes(1)
g.Expect(dstLoader.Write(ctx, images...)).To(MatchError(ContainSubstring("error pushing")))
}
func TestNewOriginalRegistrySource(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockDockerClient(ctrl)
images := []string{"image1:1", "image2:2"}
ctx := context.Background()
dstLoader := docker.NewOriginalRegistrySource(client)
for _, i := range images {
client.EXPECT().PullImage(test.AContext(), i)
}
g.Expect(dstLoader.Load(ctx, images...)).To(Succeed())
}
func TestOriginalRegistrySourceError(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
client := mocks.NewMockDockerClient(ctrl)
images := []string{"image1:1", "image2:2"}
ctx := context.Background()
dstLoader := docker.NewOriginalRegistrySource(client)
client.EXPECT().PullImage(test.AContext(), images[0]).Return(errors.New("error pulling"))
client.EXPECT().PullImage(test.AContext(), images[1]).MaxTimes(1)
g.Expect(dstLoader.Load(ctx, images...)).To(MatchError(ContainSubstring("error pulling")))
}
| 142 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/docker/mover.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockImageDiskLoader is a mock of ImageDiskLoader interface.
type MockImageDiskLoader struct {
ctrl *gomock.Controller
recorder *MockImageDiskLoaderMockRecorder
}
// MockImageDiskLoaderMockRecorder is the mock recorder for MockImageDiskLoader.
type MockImageDiskLoaderMockRecorder struct {
mock *MockImageDiskLoader
}
// NewMockImageDiskLoader creates a new mock instance.
func NewMockImageDiskLoader(ctrl *gomock.Controller) *MockImageDiskLoader {
mock := &MockImageDiskLoader{ctrl: ctrl}
mock.recorder = &MockImageDiskLoaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockImageDiskLoader) EXPECT() *MockImageDiskLoaderMockRecorder {
return m.recorder
}
// LoadFromFile mocks base method.
func (m *MockImageDiskLoader) LoadFromFile(ctx context.Context, filepath string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LoadFromFile", ctx, filepath)
ret0, _ := ret[0].(error)
return ret0
}
// LoadFromFile indicates an expected call of LoadFromFile.
func (mr *MockImageDiskLoaderMockRecorder) LoadFromFile(ctx, filepath interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadFromFile", reflect.TypeOf((*MockImageDiskLoader)(nil).LoadFromFile), ctx, filepath)
}
// MockImageDiskWriter is a mock of ImageDiskWriter interface.
type MockImageDiskWriter struct {
ctrl *gomock.Controller
recorder *MockImageDiskWriterMockRecorder
}
// MockImageDiskWriterMockRecorder is the mock recorder for MockImageDiskWriter.
type MockImageDiskWriterMockRecorder struct {
mock *MockImageDiskWriter
}
// NewMockImageDiskWriter creates a new mock instance.
func NewMockImageDiskWriter(ctrl *gomock.Controller) *MockImageDiskWriter {
mock := &MockImageDiskWriter{ctrl: ctrl}
mock.recorder = &MockImageDiskWriterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockImageDiskWriter) EXPECT() *MockImageDiskWriterMockRecorder {
return m.recorder
}
// SaveToFile mocks base method.
func (m *MockImageDiskWriter) SaveToFile(ctx context.Context, filepath string, images ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, filepath}
for _, a := range images {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SaveToFile", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// SaveToFile indicates an expected call of SaveToFile.
func (mr *MockImageDiskWriterMockRecorder) SaveToFile(ctx, filepath interface{}, images ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, filepath}, images...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveToFile", reflect.TypeOf((*MockImageDiskWriter)(nil).SaveToFile), varargs...)
}
// MockImageTaggerPusher is a mock of ImageTaggerPusher interface.
type MockImageTaggerPusher struct {
ctrl *gomock.Controller
recorder *MockImageTaggerPusherMockRecorder
}
// MockImageTaggerPusherMockRecorder is the mock recorder for MockImageTaggerPusher.
type MockImageTaggerPusherMockRecorder struct {
mock *MockImageTaggerPusher
}
// NewMockImageTaggerPusher creates a new mock instance.
func NewMockImageTaggerPusher(ctrl *gomock.Controller) *MockImageTaggerPusher {
mock := &MockImageTaggerPusher{ctrl: ctrl}
mock.recorder = &MockImageTaggerPusherMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockImageTaggerPusher) EXPECT() *MockImageTaggerPusherMockRecorder {
return m.recorder
}
// PushImage mocks base method.
func (m *MockImageTaggerPusher) PushImage(ctx context.Context, image, endpoint string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushImage", ctx, image, endpoint)
ret0, _ := ret[0].(error)
return ret0
}
// PushImage indicates an expected call of PushImage.
func (mr *MockImageTaggerPusherMockRecorder) PushImage(ctx, image, endpoint interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushImage", reflect.TypeOf((*MockImageTaggerPusher)(nil).PushImage), ctx, image, endpoint)
}
// TagImage mocks base method.
func (m *MockImageTaggerPusher) TagImage(ctx context.Context, image, endpoint string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TagImage", ctx, image, endpoint)
ret0, _ := ret[0].(error)
return ret0
}
// TagImage indicates an expected call of TagImage.
func (mr *MockImageTaggerPusherMockRecorder) TagImage(ctx, image, endpoint interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagImage", reflect.TypeOf((*MockImageTaggerPusher)(nil).TagImage), ctx, image, endpoint)
}
// MockImagePuller is a mock of ImagePuller interface.
type MockImagePuller struct {
ctrl *gomock.Controller
recorder *MockImagePullerMockRecorder
}
// MockImagePullerMockRecorder is the mock recorder for MockImagePuller.
type MockImagePullerMockRecorder struct {
mock *MockImagePuller
}
// NewMockImagePuller creates a new mock instance.
func NewMockImagePuller(ctrl *gomock.Controller) *MockImagePuller {
mock := &MockImagePuller{ctrl: ctrl}
mock.recorder = &MockImagePullerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockImagePuller) EXPECT() *MockImagePullerMockRecorder {
return m.recorder
}
// PullImage mocks base method.
func (m *MockImagePuller) PullImage(ctx context.Context, image string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PullImage", ctx, image)
ret0, _ := ret[0].(error)
return ret0
}
// PullImage indicates an expected call of PullImage.
func (mr *MockImagePullerMockRecorder) PullImage(ctx, image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImagePuller)(nil).PullImage), ctx, image)
}
// MockDockerClient is a mock of DockerClient interface.
type MockDockerClient struct {
ctrl *gomock.Controller
recorder *MockDockerClientMockRecorder
}
// MockDockerClientMockRecorder is the mock recorder for MockDockerClient.
type MockDockerClientMockRecorder struct {
mock *MockDockerClient
}
// NewMockDockerClient creates a new mock instance.
func NewMockDockerClient(ctrl *gomock.Controller) *MockDockerClient {
mock := &MockDockerClient{ctrl: ctrl}
mock.recorder = &MockDockerClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDockerClient) EXPECT() *MockDockerClientMockRecorder {
return m.recorder
}
// LoadFromFile mocks base method.
func (m *MockDockerClient) LoadFromFile(ctx context.Context, filepath string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LoadFromFile", ctx, filepath)
ret0, _ := ret[0].(error)
return ret0
}
// LoadFromFile indicates an expected call of LoadFromFile.
func (mr *MockDockerClientMockRecorder) LoadFromFile(ctx, filepath interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadFromFile", reflect.TypeOf((*MockDockerClient)(nil).LoadFromFile), ctx, filepath)
}
// PullImage mocks base method.
func (m *MockDockerClient) PullImage(ctx context.Context, image string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PullImage", ctx, image)
ret0, _ := ret[0].(error)
return ret0
}
// PullImage indicates an expected call of PullImage.
func (mr *MockDockerClientMockRecorder) PullImage(ctx, image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockDockerClient)(nil).PullImage), ctx, image)
}
// SaveToFile mocks base method.
func (m *MockDockerClient) SaveToFile(ctx context.Context, filepath string, images ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, filepath}
for _, a := range images {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SaveToFile", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// SaveToFile indicates an expected call of SaveToFile.
func (mr *MockDockerClientMockRecorder) SaveToFile(ctx, filepath interface{}, images ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, filepath}, images...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveToFile", reflect.TypeOf((*MockDockerClient)(nil).SaveToFile), varargs...)
}
// MockImageSource is a mock of ImageSource interface.
type MockImageSource struct {
ctrl *gomock.Controller
recorder *MockImageSourceMockRecorder
}
// MockImageSourceMockRecorder is the mock recorder for MockImageSource.
type MockImageSourceMockRecorder struct {
mock *MockImageSource
}
// NewMockImageSource creates a new mock instance.
func NewMockImageSource(ctrl *gomock.Controller) *MockImageSource {
mock := &MockImageSource{ctrl: ctrl}
mock.recorder = &MockImageSourceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockImageSource) EXPECT() *MockImageSourceMockRecorder {
return m.recorder
}
// Load mocks base method.
func (m *MockImageSource) Load(ctx context.Context, images ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range images {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Load", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Load indicates an expected call of Load.
func (mr *MockImageSourceMockRecorder) Load(ctx interface{}, images ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, images...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockImageSource)(nil).Load), varargs...)
}
// MockImageDestination is a mock of ImageDestination interface.
type MockImageDestination struct {
ctrl *gomock.Controller
recorder *MockImageDestinationMockRecorder
}
// MockImageDestinationMockRecorder is the mock recorder for MockImageDestination.
type MockImageDestinationMockRecorder struct {
mock *MockImageDestination
}
// NewMockImageDestination creates a new mock instance.
func NewMockImageDestination(ctrl *gomock.Controller) *MockImageDestination {
mock := &MockImageDestination{ctrl: ctrl}
mock.recorder = &MockImageDestinationMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockImageDestination) EXPECT() *MockImageDestinationMockRecorder {
return m.recorder
}
// Write mocks base method.
func (m *MockImageDestination) Write(ctx context.Context, images ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range images {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Write", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Write indicates an expected call of Write.
func (mr *MockImageDestinationMockRecorder) Write(ctx interface{}, images ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, images...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockImageDestination)(nil).Write), varargs...)
}
| 334 |
eks-anywhere | aws | Go | package eksctl
import (
"fmt"
"os"
)
const VersionEnvVar = "EKSCTL_VERSION"
var enabled string
func Enabled() bool {
return enabled == "true"
}
func ValidateVersion() error {
if os.Getenv(VersionEnvVar) == "" {
return fmt.Errorf("unable to retrieve version. Please use the 'eksctl anywhere' command to use EKS-A")
}
return nil
}
| 22 |
eks-anywhere | aws | Go | package eksctl_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/pkg/eksctl"
)
func TestValidateVersionSuccess(t *testing.T) {
t.Setenv(eksctl.VersionEnvVar, "dev")
err := eksctl.ValidateVersion()
if err != nil {
t.Fatalf("ValidateVersion() error = %v, wantErr <nil>", err)
}
}
func TestValidateVersionError(t *testing.T) {
os.Unsetenv(eksctl.VersionEnvVar)
expected := "unable to retrieve version. Please use the 'eksctl anywhere' command to use EKS-A"
err := eksctl.ValidateVersion()
if err == nil {
t.Fatalf("ValidateVersion() error = <nil>, want error = %v", expected)
}
actual := err.Error()
if expected != actual {
t.Fatalf("Expected=<%s> actual=<%s>", expected, actual)
}
}
| 30 |
eks-anywhere | aws | Go | package eksd
import (
"context"
"fmt"
"time"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
maxRetries = 5
backOffPeriod = 5 * time.Second
)
type EksdInstallerClient interface {
ApplyKubeSpecFromBytesWithNamespace(ctx context.Context, cluster *types.Cluster, data []byte, namespace string) error
}
type Reader interface {
ReadFile(url string) ([]byte, error)
}
// InstallerOpt allows to customize an eksd installer
// on construction.
type InstallerOpt func(*Installer)
type Installer struct {
client EksdInstallerClient
retrier *retrier.Retrier
reader Reader
}
// NewEksdInstaller constructs a new eks-d installer.
func NewEksdInstaller(client EksdInstallerClient, reader Reader, opts ...InstallerOpt) *Installer {
i := &Installer{
client: client,
retrier: retrier.NewWithMaxRetries(maxRetries, backOffPeriod),
reader: reader,
}
for _, opt := range opts {
opt(i)
}
return i
}
// WithRetrier allows to use a custom retrier.
func WithRetrier(retrier *retrier.Retrier) InstallerOpt {
return func(i *Installer) {
i.retrier = retrier
}
}
func (i *Installer) InstallEksdCRDs(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error {
var eksdCRDs []byte
eksdCrds := map[string]struct{}{}
for _, vb := range clusterSpec.Bundles.Spec.VersionsBundles {
eksdCrds[vb.EksD.Components] = struct{}{}
}
for crd := range eksdCrds {
if err := i.retrier.Retry(
func() error {
var readerErr error
eksdCRDs, readerErr = i.reader.ReadFile(crd)
return readerErr
},
); err != nil {
return fmt.Errorf("loading manifest for eksd components: %v", err)
}
if err := i.retrier.Retry(
func() error {
return i.client.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, eksdCRDs, constants.EksaSystemNamespace)
},
); err != nil {
return fmt.Errorf("applying eksd release crd: %v", err)
}
}
return nil
}
// SetRetrier allows to modify the internal retrier
// For unit testing purposes only. It is not thread safe.
func (i *Installer) SetRetrier(retrier *retrier.Retrier) {
i.retrier = retrier
}
func (i *Installer) InstallEksdManifest(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error {
var eksdReleaseManifest []byte
for _, vb := range clusterSpec.Bundles.Spec.VersionsBundles {
if err := i.retrier.Retry(
func() error {
var readerErr error
eksdReleaseManifest, readerErr = i.reader.ReadFile(vb.EksD.EksDReleaseUrl)
return readerErr
},
); err != nil {
return fmt.Errorf("loading manifest for eksd components: %v", err)
}
logger.V(4).Info("Applying eksd manifest to cluster")
if err := i.retrier.Retry(
func() error {
return i.client.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, eksdReleaseManifest, constants.EksaSystemNamespace)
},
); err != nil {
return fmt.Errorf("applying eksd release manifest: %v", err)
}
}
return nil
}
| 120 |
eks-anywhere | aws | Go | package eksd_test
import (
"context"
"errors"
"fmt"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
m "github.com/aws/eks-anywhere/internal/test/mocks"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/eksd"
"github.com/aws/eks-anywhere/pkg/eksd/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
var testdataFile = "testdata/testdata.yaml"
type installerTest struct {
*WithT
ctx context.Context
client *mocks.MockEksdInstallerClient
reader *m.MockReader
clusterSpec *cluster.Spec
eksdInstaller *eksd.Installer
cluster *types.Cluster
}
func newInstallerTest(t *testing.T) *installerTest {
ctrl := gomock.NewController(t)
client := mocks.NewMockEksdInstallerClient(ctrl)
reader := m.NewMockReader(ctrl)
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.EksD.Name = "eks-d-1"
})
return &installerTest{
WithT: NewWithT(t),
ctx: context.Background(),
client: client,
reader: reader,
eksdInstaller: eksd.NewEksdInstaller(client, reader),
clusterSpec: clusterSpec,
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "k.kubeconfig",
},
}
}
func TestInstallEksdCRDsSuccess(t *testing.T) {
tt := newInstallerTest(t)
tt.clusterSpec.Bundles = bundle()
tt.reader.EXPECT().ReadFile(testdataFile).Return([]byte("test data"), nil).Times(1)
tt.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, tt.cluster, []byte("test data"), constants.EksaSystemNamespace).Return(nil)
if err := tt.eksdInstaller.InstallEksdCRDs(tt.ctx, tt.clusterSpec, tt.cluster); err != nil {
t.Errorf("Eksd.InstallEksdCRDs() error = %v, wantErr nil", err)
}
}
func TestInstallEksdManifestSuccess(t *testing.T) {
tt := newInstallerTest(t)
tt.eksdInstaller = eksd.NewEksdInstaller(tt.client, tt.reader, eksd.WithRetrier(retrier.NewWithMaxRetries(3, 0)))
tt.clusterSpec.Bundles = bundle()
tt.reader.EXPECT().ReadFile(testdataFile).Return([]byte("test data"), nil).Times(2)
tt.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, tt.cluster, []byte("test data"), constants.EksaSystemNamespace).Return(errors.New("error apply")).Times(2)
tt.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, tt.cluster, []byte("test data"), constants.EksaSystemNamespace).Return(nil).Times(2)
if err := tt.eksdInstaller.InstallEksdManifest(tt.ctx, tt.clusterSpec, tt.cluster); err != nil {
t.Errorf("Eksd.InstallEksdManifest() error = %v, wantErr nil", err)
}
}
func TestInstallEksdManifestErrorReadingManifest(t *testing.T) {
tt := newInstallerTest(t)
tt.eksdInstaller.SetRetrier(retrier.NewWithMaxRetries(1, 0))
tt.clusterSpec.Bundles = bundle()
tt.clusterSpec.Bundles.Spec.VersionsBundles[0].EksD.EksDReleaseUrl = "fake.yaml"
tt.reader.EXPECT().ReadFile(tt.clusterSpec.Bundles.Spec.VersionsBundles[0].EksD.EksDReleaseUrl).Return([]byte(""), fmt.Errorf("error"))
if err := tt.eksdInstaller.InstallEksdManifest(tt.ctx, tt.clusterSpec, tt.cluster); err == nil {
t.Error("Eksd.InstallEksdManifest() error = nil, wantErr not nil")
}
}
func bundle() *v1alpha1.Bundles {
return &v1alpha1.Bundles{
Spec: v1alpha1.BundlesSpec{
VersionsBundles: []v1alpha1.VersionsBundle{
{
EksD: v1alpha1.EksDRelease{
Components: testdataFile,
EksDReleaseUrl: testdataFile,
},
},
{
EksD: v1alpha1.EksDRelease{
Components: testdataFile,
EksDReleaseUrl: testdataFile,
},
},
},
},
}
}
| 113 |
eks-anywhere | aws | Go | package eksd
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
)
type Upgrader struct {
*Installer
}
// UpgraderOpt allows to customize an eksd upgrader on construction.
type UpgraderOpt = InstallerOpt
// NewUpgrader constructs a new eks-d upgrader.
func NewUpgrader(client EksdInstallerClient, reader Reader, opts ...UpgraderOpt) *Upgrader {
return &Upgrader{
NewEksdInstaller(client, reader, opts...),
}
}
func (u *Upgrader) Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error) {
logger.V(1).Info("Checking for EKS-D components upgrade")
changeDiff := EksdChangeDiff(currentSpec, newSpec)
if changeDiff == nil {
logger.V(1).Info("Nothing to upgrade for EKS-D components")
return nil, nil
}
logger.V(1).Info("Starting EKS-D components upgrade")
if err := u.InstallEksdCRDs(ctx, newSpec, cluster); err != nil {
return nil, fmt.Errorf("upgrading EKS-D components from version %s to version %s: %v", changeDiff.ComponentReports[0].OldVersion, changeDiff.ComponentReports[0].NewVersion, err)
}
return changeDiff, nil
}
func EksdChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ChangeDiff {
if currentSpec.VersionsBundle.EksD.Name != newSpec.VersionsBundle.EksD.Name {
logger.V(1).Info("EKS-D change diff ", "oldVersion ", currentSpec.VersionsBundle.EksD.Name, "newVersion ", newSpec.VersionsBundle.EksD.Name)
return &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
{
ComponentName: "EKS-D",
NewVersion: newSpec.VersionsBundle.EksD.Name,
OldVersion: currentSpec.VersionsBundle.EksD.Name,
},
},
}
}
return nil
}
| 55 |
eks-anywhere | aws | Go | package eksd_test
import (
"context"
"fmt"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
m "github.com/aws/eks-anywhere/internal/test/mocks"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/eksd"
"github.com/aws/eks-anywhere/pkg/eksd/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
type upgraderTest struct {
*WithT
ctx context.Context
client *mocks.MockEksdInstallerClient
reader *m.MockReader
currentSpec *cluster.Spec
newSpec *cluster.Spec
eksdUpgrader *eksd.Upgrader
cluster *types.Cluster
}
func newUpgraderTest(t *testing.T) *upgraderTest {
ctrl := gomock.NewController(t)
client := mocks.NewMockEksdInstallerClient(ctrl)
reader := m.NewMockReader(ctrl)
currentSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.EksD.Name = "eks-d-1"
})
return &upgraderTest{
WithT: NewWithT(t),
ctx: context.Background(),
client: client,
reader: reader,
eksdUpgrader: eksd.NewUpgrader(client, reader),
currentSpec: currentSpec,
newSpec: currentSpec.DeepCopy(),
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "k.kubeconfig",
},
}
}
func TestEksdUpgradeNoSelfManaged(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.Cluster.SetManagedBy("management-cluster")
tt.Expect(tt.eksdUpgrader.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestEksdUpgradeNoChanges(t *testing.T) {
tt := newUpgraderTest(t)
tt.Expect(tt.eksdUpgrader.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestEksdUpgradeSuccess(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.VersionsBundle.EksD.Name = "eks-d-2"
tt.newSpec.Bundles = bundle()
wantDiff := &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
{
ComponentName: "EKS-D",
NewVersion: "eks-d-2",
OldVersion: "eks-d-1",
},
},
}
tt.reader.EXPECT().ReadFile(testdataFile).Return([]byte("test data"), nil)
tt.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, tt.cluster, []byte("test data"), constants.EksaSystemNamespace).Return(nil)
tt.Expect(tt.eksdUpgrader.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)).To(Equal(wantDiff))
}
func TestUpgraderEksdUpgradeInstallError(t *testing.T) {
tt := newUpgraderTest(t)
tt.eksdUpgrader.SetRetrier(retrier.NewWithMaxRetries(1, 0))
tt.newSpec.VersionsBundle.EksD.Name = "eks-d-2"
tt.newSpec.Bundles = bundle()
tt.newSpec.Bundles.Spec.VersionsBundles[0].EksD.Components = ""
tt.newSpec.Bundles.Spec.VersionsBundles[1].EksD.Components = ""
tt.reader.EXPECT().ReadFile(tt.newSpec.Bundles.Spec.VersionsBundles[0].EksD.Components).Return([]byte(""), fmt.Errorf("error"))
// components file not set so this should return an error in failing to load manifest
_, err := tt.eksdUpgrader.Upgrade(tt.ctx, tt.cluster, tt.currentSpec, tt.newSpec)
tt.Expect(err).NotTo(BeNil())
}
| 102 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/eksd (interfaces: EksdInstallerClient)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
types "github.com/aws/eks-anywhere/pkg/types"
gomock "github.com/golang/mock/gomock"
)
// MockEksdInstallerClient is a mock of EksdInstallerClient interface.
type MockEksdInstallerClient struct {
ctrl *gomock.Controller
recorder *MockEksdInstallerClientMockRecorder
}
// MockEksdInstallerClientMockRecorder is the mock recorder for MockEksdInstallerClient.
type MockEksdInstallerClientMockRecorder struct {
mock *MockEksdInstallerClient
}
// NewMockEksdInstallerClient creates a new mock instance.
func NewMockEksdInstallerClient(ctrl *gomock.Controller) *MockEksdInstallerClient {
mock := &MockEksdInstallerClient{ctrl: ctrl}
mock.recorder = &MockEksdInstallerClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockEksdInstallerClient) EXPECT() *MockEksdInstallerClientMockRecorder {
return m.recorder
}
// ApplyKubeSpecFromBytesWithNamespace mocks base method.
func (m *MockEksdInstallerClient) ApplyKubeSpecFromBytesWithNamespace(arg0 context.Context, arg1 *types.Cluster, arg2 []byte, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytesWithNamespace", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytesWithNamespace indicates an expected call of ApplyKubeSpecFromBytesWithNamespace.
func (mr *MockEksdInstallerClientMockRecorder) ApplyKubeSpecFromBytesWithNamespace(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesWithNamespace", reflect.TypeOf((*MockEksdInstallerClient)(nil).ApplyKubeSpecFromBytesWithNamespace), arg0, arg1, arg2, arg3)
}
| 51 |
eks-anywhere | aws | Go | package errors
import "k8s.io/apimachinery/pkg/util/errors"
// Aggregate represents an object that contains multiple errors, but does not necessarily have singular semantic meaning.
// The aggregate can be used with `errors.Is()` to check for the occurrence of a specific error type.
// Errors.As() is not supported, because the caller presumably cares about a specific error of potentially multiple that match the given type.
type Aggregate errors.Aggregate
// NewAggregate converts a slice of errors into an Aggregate interface, which
// is itself an implementation of the error interface. If the slice is empty,
// this returns nil.
// It will check if any of the element of input error list is nil, to avoid
// nil pointer panic when call Error().
func NewAggregate(errList []error) Aggregate {
return errors.NewAggregate(errList)
}
// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary
// nesting, and flattens them all into a single Aggregate, recursively.
func Flatten(agg Aggregate) Aggregate {
return errors.Flatten(agg)
}
| 24 |
eks-anywhere | aws | Go | package executables
import (
"context"
"fmt"
)
const awsCliPath = "aws"
type AwsCli struct {
Executable
}
func NewAwsCli(executable Executable) *AwsCli {
return &AwsCli{
Executable: executable,
}
}
func (ac *AwsCli) CreateAccessKey(ctx context.Context, username string) (string, error) {
stdOut, err := ac.Execute(ctx, "iam", "create-access-key", "--user-name", username)
if err != nil {
return "", fmt.Errorf("executing iam create-access-key: %v", err)
}
return stdOut.String(), nil
}
| 27 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
)
func TestCreateAccessKeySuccess(t *testing.T) {
var userName string
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "iam", "create-access-key", "--user-name", userName).Return(bytes.Buffer{}, nil)
c := executables.NewAwsCli(executable)
_, err := c.CreateAccessKey(ctx, userName)
if err != nil {
t.Fatalf("Awscli.CreateAccessKey() error = %v, want nil", err)
}
}
func TestCreateAccessKeyError(t *testing.T) {
var userName string
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "iam", "create-access-key", "--user-name", userName).Return(bytes.Buffer{}, errors.New("error from execute"))
c := executables.NewAwsCli(executable)
_, err := c.CreateAccessKey(ctx, userName)
if err == nil {
t.Fatalf("Awscli.CreateAccessKey() error = %v, want not nil", err)
}
}
| 42 |
eks-anywhere | aws | Go | package executables
import (
"context"
"fmt"
"os"
"strings"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
const defaultEksaImage = "public.ecr.aws/l0g8r8j6/eks-anywhere-cli-tools:v0.7.2-eks-a-v0.0.0-dev-build.1864"
type ExecutableBuilder interface {
Init(ctx context.Context) (Closer, error)
Build(binaryPath string) Executable
}
type ExecutablesBuilder struct {
executableBuilder ExecutableBuilder
}
func NewExecutablesBuilder(executableBuilder ExecutableBuilder) *ExecutablesBuilder {
return &ExecutablesBuilder{
executableBuilder: executableBuilder,
}
}
func (b *ExecutablesBuilder) BuildKindExecutable(writer filewriter.FileWriter) *Kind {
return NewKind(b.executableBuilder.Build(kindPath), writer)
}
func (b *ExecutablesBuilder) BuildClusterAwsAdmExecutable() *Clusterawsadm {
return NewClusterawsadm(b.executableBuilder.Build(clusterAwsAdminPath))
}
// BuildClusterCtlExecutable builds a new Clusterctl executable.
func (b *ExecutablesBuilder) BuildClusterCtlExecutable(writer filewriter.FileWriter, reader manifests.FileReader) *Clusterctl {
return NewClusterctl(b.executableBuilder.Build(clusterCtlPath), writer, reader)
}
func (b *ExecutablesBuilder) BuildKubectlExecutable() *Kubectl {
return NewKubectl(b.executableBuilder.Build(kubectlPath))
}
func (b *ExecutablesBuilder) BuildGovcExecutable(writer filewriter.FileWriter, opts ...GovcOpt) *Govc {
return NewGovc(b.executableBuilder.Build(govcPath), writer, opts...)
}
// BuildCmkExecutable initializes a Cmk object and returns it.
func (b *ExecutablesBuilder) BuildCmkExecutable(writer filewriter.FileWriter, config *decoder.CloudStackExecConfig) (*Cmk, error) {
return NewCmk(b.executableBuilder.Build(cmkPath), writer, config)
}
func (b *ExecutablesBuilder) BuildAwsCli() *AwsCli {
return NewAwsCli(b.executableBuilder.Build(awsCliPath))
}
func (b *ExecutablesBuilder) BuildFluxExecutable() *Flux {
return NewFlux(b.executableBuilder.Build(fluxPath))
}
func (b *ExecutablesBuilder) BuildTroubleshootExecutable() *Troubleshoot {
return NewTroubleshoot(b.executableBuilder.Build(troubleshootPath))
}
func (b *ExecutablesBuilder) BuildHelmExecutable(opts ...HelmOpt) *Helm {
return NewHelm(b.executableBuilder.Build(helmPath), opts...)
}
// BuildDockerExecutable initializes a docker executable and returns it.
func (b *ExecutablesBuilder) BuildDockerExecutable() *Docker {
return NewDocker(b.executableBuilder.Build(dockerPath))
}
// BuildSSHExecutable initializes a SSH executable and returns it.
func (b *ExecutablesBuilder) BuildSSHExecutable() *SSH {
return NewSSH(b.executableBuilder.Build(sshPath))
}
// Init initializes the executable builder and returns a Closer
// that needs to be called once the executables are not in used anymore
// The closer will cleanup and free all internal resources.
func (b *ExecutablesBuilder) Init(ctx context.Context) (Closer, error) {
return b.executableBuilder.Init(ctx)
}
func BuildSonobuoyExecutable() *Sonobuoy {
return NewSonobuoy(&executable{
cli: sonobuoyPath,
})
}
func BuildDockerExecutable() *Docker {
return NewDocker(&executable{
cli: dockerPath,
})
}
// RunExecutablesInDocker determines if binary executables should be ran
// from a docker container or native binaries from the host path
// It reads MR_TOOLS_DISABLE variable.
func ExecutablesInDocker() bool {
if env, ok := os.LookupEnv("MR_TOOLS_DISABLE"); ok && strings.EqualFold(env, "true") {
logger.Info("Warning: eks-a tools image disabled, using client's executables")
return false
}
return true
}
// InitInDockerExecutablesBuilder builds and inits a default ExecutablesBuilder to run executables in a docker container
// that will make use of a long running docker container.
func InitInDockerExecutablesBuilder(ctx context.Context, image string, mountDirs ...string) (*ExecutablesBuilder, Closer, error) {
b, err := NewInDockerExecutablesBuilder(BuildDockerExecutable(), image, mountDirs...)
if err != nil {
return nil, nil, err
}
closer, err := b.Init(ctx)
if err != nil {
return nil, nil, err
}
return b, closer, nil
}
// NewInDockerExecutablesBuilder builds an executables builder for docker.
func NewInDockerExecutablesBuilder(dockerClient DockerClient, image string, mountDirs ...string) (*ExecutablesBuilder, error) {
currentDir, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("getting current directory: %v", err)
}
mountDirs = append(mountDirs, currentDir)
dockerContainer := newDockerContainer(image, currentDir, mountDirs, dockerClient)
dockerExecutableBuilder := NewDockerExecutableBuilder(dockerContainer)
return NewExecutablesBuilder(dockerExecutableBuilder), nil
}
func NewLocalExecutablesBuilder() *ExecutablesBuilder {
return NewExecutablesBuilder(newLocalExecutableBuilder())
}
func DefaultEksaImage() string {
return defaultEksaImage
}
type Closer func(ctx context.Context) error
// Close implements interface types.Closer.
func (c Closer) Close(ctx context.Context) error {
return c(ctx)
}
// CheckErr just calls the closer and logs an error if present
// It's mostly a helper for defering the close in a oneliner without ignoring the error.
func (c Closer) CheckErr(ctx context.Context) {
if err := c(ctx); err != nil {
logger.Error(err, "Failed closing container for executables")
}
}
| 166 |
eks-anywhere | aws | Go | package executables_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
func TestLocalExecutablesBuilderAllExecutables(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := executables.NewLocalExecutablesBuilder()
closer, err := b.Init(ctx)
g.Expect(err).NotTo(HaveOccurred())
_, writer := test.NewWriter(t)
reader := files.NewReader()
kind := b.BuildKindExecutable(writer)
g.Expect(kind).NotTo(BeNil())
awsAdm := b.BuildClusterAwsAdmExecutable()
g.Expect(awsAdm).NotTo(BeNil())
clusterctl := b.BuildClusterCtlExecutable(writer, reader)
g.Expect(clusterctl).NotTo(BeNil())
kubectl := b.BuildKubectlExecutable()
g.Expect(kubectl).NotTo(BeNil())
govc := b.BuildGovcExecutable(writer)
g.Expect(govc).NotTo(BeNil())
cmk, err := b.BuildCmkExecutable(writer, &decoder.CloudStackExecConfig{
Profiles: make([]decoder.CloudStackProfileConfig, 0),
})
g.Expect(cmk).NotTo(BeNil())
aws := b.BuildAwsCli()
g.Expect(aws).NotTo(BeNil())
flux := b.BuildFluxExecutable()
g.Expect(flux).NotTo(BeNil())
trouble := b.BuildTroubleshootExecutable()
g.Expect(trouble).NotTo(BeNil())
helm := b.BuildHelmExecutable()
g.Expect(helm).NotTo(BeNil())
docker := b.BuildDockerExecutable()
g.Expect(docker).NotTo(BeNil())
ssh := b.BuildSSHExecutable()
g.Expect(ssh).NotTo(BeNil())
g.Expect(err).NotTo(HaveOccurred())
g.Expect(closer(ctx)).To(Succeed())
}
func TestExecutablesInDocker(t *testing.T) {
tests := []struct {
name string
envVarValue string
want bool
}{
{
name: "true",
envVarValue: "true",
want: false,
},
{
name: "false",
envVarValue: "false",
want: true,
},
{
name: "not set",
envVarValue: "",
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.envVarValue != "" {
t.Setenv("MR_TOOLS_DISABLE", tt.envVarValue)
}
g := NewWithT(t)
g.Expect(executables.ExecutablesInDocker()).To(Equal(tt.want))
})
}
}
func TestInDockerExecutablesBuilder(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
image := "image"
ctrl := gomock.NewController(t)
c := mocks.NewMockDockerClient(ctrl)
c.EXPECT().PullImage(ctx, image)
c.EXPECT().Execute(ctx, gomock.Any()) // Init container
c.EXPECT().Execute(ctx, gomock.Any()) // Remove container
b, err := executables.NewInDockerExecutablesBuilder(c, image)
g.Expect(err).NotTo(HaveOccurred())
closer, err := b.Init(ctx)
h := b.BuildHelmExecutable()
g.Expect(h).NotTo(BeNil())
g.Expect(err).NotTo(HaveOccurred())
g.Expect(closer(ctx)).To(Succeed())
}
func TestLocalExecutablesBuilder(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := executables.NewLocalExecutablesBuilder()
closer, err := b.Init(ctx)
h := b.BuildHelmExecutable()
g.Expect(h).NotTo(BeNil())
g.Expect(err).NotTo(HaveOccurred())
g.Expect(closer(ctx)).To(Succeed())
}
| 121 |
eks-anywhere | aws | Go | package executables
import (
"context"
"fmt"
"strings"
"github.com/aws/eks-anywhere/pkg/logger"
)
const clusterAwsAdminPath = "clusterawsadm"
type Clusterawsadm struct {
Executable
}
func NewClusterawsadm(executable Executable) *Clusterawsadm {
return &Clusterawsadm{Executable: executable}
}
func (c *Clusterawsadm) BootstrapIam(ctx context.Context, envs map[string]string, configFile string) error {
_, err := c.ExecuteWithEnv(ctx, envs, "bootstrap", "iam", "create-cloudformation-stack",
"--config", configFile)
if err != nil {
return fmt.Errorf("executing bootstrap iam: %v", err)
}
return err
}
func (c *Clusterawsadm) BootstrapCreds(ctx context.Context, envs map[string]string) (string, error) {
stdOut, err := c.ExecuteWithEnv(ctx, envs, "bootstrap", "credentials", "encode-as-profile")
if err != nil {
return "", fmt.Errorf("executing bootstrap credentials: %v", err)
}
return stdOut.String(), nil
}
func (c *Clusterawsadm) ListAccessKeys(ctx context.Context, userName string) (string, error) {
stdOut, err := c.Execute(ctx, "aws", "iam", "list-access-keys", "--user-name", userName)
if err != nil {
return "", fmt.Errorf("listing user keys: %v", err)
}
return stdOut.String(), nil
}
func (c *Clusterawsadm) DeleteCloudformationStack(ctx context.Context, envs map[string]string, fileName string) error {
logger.V(1).Info("Deleting AWS user")
_, err := c.ExecuteWithEnv(ctx, envs, "bootstrap", "iam", "delete-cloudformation-stack", "--config", fileName)
if err != nil {
if strings.Contains(err.Error(), "status code: 400") {
return nil
} else {
return fmt.Errorf("failed to delete user %v", err)
}
}
return nil
}
| 58 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
)
func TestBootstrapIamSuccess(t *testing.T) {
configFile := "testfile"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(ctx, map[string]string{}, "bootstrap", "iam", "create-cloudformation-stack", "--config", configFile).Return(bytes.Buffer{}, nil)
c := executables.NewClusterawsadm(executable)
err := c.BootstrapIam(ctx, map[string]string{}, configFile)
if err != nil {
t.Fatalf("Clusterawsadm.BootstrapIam() error = %v, want = nil", err)
}
}
func TestBootstrapIamError(t *testing.T) {
configFile := "testfile"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(ctx, map[string]string{}, "bootstrap", "iam", "create-cloudformation-stack", "--config", configFile).Return(bytes.Buffer{}, errors.New("error from execute with env"))
c := executables.NewClusterawsadm(executable)
err := c.BootstrapIam(ctx, map[string]string{}, configFile)
if err == nil {
t.Fatalf("Clusterawsadm.BootstrapIam() error = %v, want = not nil", err)
}
}
func TestBootstrapCredsSuccess(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(ctx, map[string]string{}, "bootstrap", "credentials", "encode-as-profile").Return(bytes.Buffer{}, nil)
c := executables.NewClusterawsadm(executable)
_, err := c.BootstrapCreds(ctx, map[string]string{})
if err != nil {
t.Fatalf("Clusterawsadm.BootstrapCreds() error = %v, want = nil", err)
}
}
func TestBootstrapCredsError(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(ctx, map[string]string{}, "bootstrap", "credentials", "encode-as-profile").Return(bytes.Buffer{}, errors.New("error from execute with env"))
c := executables.NewClusterawsadm(executable)
_, err := c.BootstrapCreds(ctx, map[string]string{})
if err == nil {
t.Fatalf("Clusterawsadm.BootstrapCreds() error = %v, want = not nil", err)
}
}
func TestListAccessKeysSuccess(t *testing.T) {
userName := "user"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "aws", "iam", "list-access-keys", "--user-name", userName).Return(bytes.Buffer{}, nil)
c := executables.NewClusterawsadm(executable)
_, err := c.ListAccessKeys(ctx, userName)
if err != nil {
t.Fatalf("Clusterawsadm.ListAccessKeys() error = %v, want nil", err)
}
}
func TestListAccessKeysError(t *testing.T) {
userName := "user"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "aws", "iam", "list-access-keys", "--user-name", userName).Return(bytes.Buffer{}, errors.New("error from execute"))
c := executables.NewClusterawsadm(executable)
_, err := c.ListAccessKeys(ctx, userName)
if err == nil {
t.Fatalf("Clusterawsadm.ListAccessKeys() error = %v, want not nil", err)
}
}
func TestDeleteCloudformationStackSuccess(t *testing.T) {
fileName := "testfile"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(ctx, map[string]string{}, "bootstrap", "iam", "delete-cloudformation-stack", "--config", fileName).Return(bytes.Buffer{}, nil)
c := executables.NewClusterawsadm(executable)
err := c.DeleteCloudformationStack(ctx, map[string]string{}, fileName)
if err != nil {
t.Fatalf("Clusterawsadm.DeleteCloudformationStack() error = %v, want nil", err)
}
}
func TestDeleteCloudformationStackError(t *testing.T) {
fileName := "testfile"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(ctx, map[string]string{}, "bootstrap", "iam", "delete-cloudformation-stack", "--config", fileName).Return(bytes.Buffer{}, errors.New("error from execute"))
c := executables.NewClusterawsadm(executable)
err := c.DeleteCloudformationStack(ctx, map[string]string{}, fileName)
if err == nil {
t.Fatalf("Clusterawsadm.DeleteCloudformationStack() error = %v, want not nil", err)
}
}
| 126 |
eks-anywhere | aws | Go | package executables
import (
"context"
_ "embed"
"fmt"
"os"
"path"
"path/filepath"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
clusterCtlPath = "clusterctl"
clusterctlConfigFile = "clusterctl_tmp.yaml"
capiPrefix = "/generated/overrides"
etcdadmBootstrapProviderName = "etcdadm-bootstrap"
etcdadmControllerProviderName = "etcdadm-controller"
kubeadmBootstrapProviderName = "kubeadm"
)
//go:embed config/clusterctl.yaml
var clusterctlConfigTemplate string
type Clusterctl struct {
Executable
writer filewriter.FileWriter
reader manifests.FileReader
}
type clusterctlConfiguration struct {
coreVersion string
bootstrapVersion string
controlPlaneVersion string
configFile string
etcdadmBootstrapVersion string
etcdadmControllerVersion string
}
// NewClusterctl builds a new [Clusterctl].
func NewClusterctl(executable Executable, writer filewriter.FileWriter, reader manifests.FileReader) *Clusterctl {
return &Clusterctl{
Executable: executable,
writer: writer,
reader: reader,
}
}
func imageRepository(image v1alpha1.Image) string {
return path.Dir(image.Image())
}
// This method will write the configuration files
// used by cluster api to install components.
// See: https://cluster-api.sigs.k8s.io/clusterctl/configuration.html
func (c *Clusterctl) buildOverridesLayer(clusterSpec *cluster.Spec, clusterName string, provider providers.Provider) error {
bundle := clusterSpec.VersionsBundle
// Adding cluster name to path temporarily following suggestion.
//
// This adds an implicit dependency between this method
// and the writer passed to NewClusterctl
// Ideally the writer implementation should be modified to
// accept a path and file name and it should create the path in case it
// does not exists.
prefix := filepath.Join(clusterName, generatedDir, overridesDir)
infraBundles := []types.InfrastructureBundle{
{
FolderName: filepath.Join("cert-manager", bundle.CertManager.Version),
Manifests: []v1alpha1.Manifest{
bundle.CertManager.Manifest,
},
},
{
FolderName: filepath.Join("bootstrap-kubeadm", bundle.Bootstrap.Version),
Manifests: []v1alpha1.Manifest{
bundle.Bootstrap.Components,
bundle.Bootstrap.Metadata,
},
},
{
FolderName: filepath.Join("cluster-api", bundle.ClusterAPI.Version),
Manifests: []v1alpha1.Manifest{
bundle.ClusterAPI.Components,
bundle.ClusterAPI.Metadata,
},
},
{
FolderName: filepath.Join("control-plane-kubeadm", bundle.ControlPlane.Version),
Manifests: []v1alpha1.Manifest{
bundle.ControlPlane.Components,
bundle.ControlPlane.Metadata,
},
},
{
FolderName: filepath.Join("bootstrap-etcdadm-bootstrap", bundle.ExternalEtcdBootstrap.Version),
Manifests: []v1alpha1.Manifest{
bundle.ExternalEtcdBootstrap.Components,
bundle.ExternalEtcdBootstrap.Metadata,
},
},
{
FolderName: filepath.Join("bootstrap-etcdadm-controller", bundle.ExternalEtcdController.Version),
Manifests: []v1alpha1.Manifest{
bundle.ExternalEtcdController.Components,
bundle.ExternalEtcdController.Metadata,
},
},
}
infraBundles = append(infraBundles, *provider.GetInfrastructureBundle(clusterSpec))
for _, infraBundle := range infraBundles {
if err := c.writeInfrastructureBundle(clusterSpec, prefix, &infraBundle); err != nil {
return err
}
}
return nil
}
func (c *Clusterctl) writeInfrastructureBundle(clusterSpec *cluster.Spec, rootFolder string, bundle *types.InfrastructureBundle) error {
if bundle == nil {
return nil
}
infraFolder := filepath.Join(rootFolder, bundle.FolderName)
if err := os.MkdirAll(infraFolder, os.ModePerm); err != nil {
return err
}
for _, manifest := range bundle.Manifests {
m, err := bundles.ReadManifest(c.reader, manifest)
if err != nil {
return fmt.Errorf("can't load infrastructure bundle for manifest %s: %v", manifest.URI, err)
}
if err := os.WriteFile(filepath.Join(infraFolder, m.Filename), m.Content, 0o644); err != nil {
return fmt.Errorf("generating file for infrastructure bundle %s: %v", m.Filename, err)
}
}
return nil
}
// BackupManagement save CAPI resources of a workload cluster before moving it to the bootstrap cluster during upgrade.
func (c *Clusterctl) BackupManagement(ctx context.Context, cluster *types.Cluster, managementStatePath string) error {
filePath := filepath.Join(".", cluster.Name, managementStatePath)
err := os.MkdirAll(filePath, os.ModePerm)
if err != nil {
return fmt.Errorf("could not create backup file for CAPI objects: %v", err)
}
_, err = c.Execute(
ctx, "move",
"--to-directory", filePath,
"--kubeconfig", cluster.KubeconfigFile,
"--namespace", constants.EksaSystemNamespace,
)
if err != nil {
return fmt.Errorf("failed taking backup of CAPI objects: %v", err)
}
return nil
}
// MoveManagement moves management components `from` cluster `to` cluster
// If `clusterName` is provided, it filters and moves only the provided cluster.
func (c *Clusterctl) MoveManagement(ctx context.Context, from, to *types.Cluster, clusterName string) error {
params := []string{
"move", "--to-kubeconfig", to.KubeconfigFile, "--namespace", constants.EksaSystemNamespace,
"--filter-cluster", clusterName,
}
if from.KubeconfigFile != "" {
params = append(params, "--kubeconfig", from.KubeconfigFile)
}
_, err := c.Execute(
ctx, params...,
)
if err != nil {
return fmt.Errorf("failed moving management cluster: %v", err)
}
return err
}
func (c *Clusterctl) GetWorkloadKubeconfig(ctx context.Context, clusterName string, cluster *types.Cluster) ([]byte, error) {
stdOut, err := c.Execute(
ctx, "get", "kubeconfig", clusterName,
"--kubeconfig", cluster.KubeconfigFile,
"--namespace", constants.EksaSystemNamespace,
)
if err != nil {
return nil, fmt.Errorf("executing get kubeconfig: %v", err)
}
return stdOut.Bytes(), nil
}
func (c *Clusterctl) InitInfrastructure(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider) error {
if cluster == nil {
return fmt.Errorf("invalid cluster (nil)")
}
if cluster.Name == "" {
return fmt.Errorf("invalid cluster name '%s'", cluster.Name)
}
clusterctlConfig, err := c.buildConfig(clusterSpec, cluster.Name, provider)
if err != nil {
return err
}
params := []string{
"init",
"--core", clusterctlConfig.coreVersion,
"--bootstrap", clusterctlConfig.bootstrapVersion,
"--control-plane", clusterctlConfig.controlPlaneVersion,
"--infrastructure", fmt.Sprintf("%s:%s", provider.Name(), provider.Version(clusterSpec)),
"--config", clusterctlConfig.configFile,
"--bootstrap", clusterctlConfig.etcdadmBootstrapVersion,
"--bootstrap", clusterctlConfig.etcdadmControllerVersion,
}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
envMap, err := provider.EnvMap(clusterSpec)
if err != nil {
return err
}
_, err = c.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
return fmt.Errorf("executing init: %v", err)
}
return nil
}
func (c *Clusterctl) buildConfig(clusterSpec *cluster.Spec, clusterName string, provider providers.Provider) (*clusterctlConfiguration, error) {
t := templater.New(c.writer)
bundle := clusterSpec.VersionsBundle
path, err := os.Getwd()
if err != nil {
return nil, err
}
data := map[string]string{
"CertManagerInjectorRepository": imageRepository(bundle.CertManager.Cainjector),
"CertManagerInjectorTag": bundle.CertManager.Cainjector.Tag(),
"CertManagerControllerRepository": imageRepository(bundle.CertManager.Controller),
"CertManagerControllerTag": bundle.CertManager.Controller.Tag(),
"CertManagerWebhookRepository": imageRepository(bundle.CertManager.Webhook),
"CertManagerWebhookTag": bundle.CertManager.Webhook.Tag(),
"CertManagerVersion": bundle.CertManager.Version,
"ClusterApiControllerRepository": imageRepository(bundle.ClusterAPI.Controller),
"ClusterApiControllerTag": bundle.ClusterAPI.Controller.Tag(),
"ClusterApiKubeRbacProxyRepository": imageRepository(bundle.ClusterAPI.KubeProxy),
"ClusterApiKubeRbacProxyTag": bundle.ClusterAPI.KubeProxy.Tag(),
"KubeadmBootstrapControllerRepository": imageRepository(bundle.Bootstrap.Controller),
"KubeadmBootstrapControllerTag": bundle.Bootstrap.Controller.Tag(),
"KubeadmBootstrapKubeRbacProxyRepository": imageRepository(bundle.Bootstrap.KubeProxy),
"KubeadmBootstrapKubeRbacProxyTag": bundle.Bootstrap.KubeProxy.Tag(),
"KubeadmControlPlaneControllerRepository": imageRepository(bundle.ControlPlane.Controller),
"KubeadmControlPlaneControllerTag": bundle.ControlPlane.Controller.Tag(),
"KubeadmControlPlaneKubeRbacProxyRepository": imageRepository(bundle.ControlPlane.KubeProxy),
"KubeadmControlPlaneKubeRbacProxyTag": bundle.ControlPlane.KubeProxy.Tag(),
"ClusterApiVSphereControllerRepository": imageRepository(bundle.VSphere.ClusterAPIController),
"ClusterApiVSphereControllerTag": bundle.VSphere.ClusterAPIController.Tag(),
"ClusterApiNutanixControllerRepository": imageRepository(bundle.Nutanix.ClusterAPIController),
"ClusterApiNutanixControllerTag": bundle.Nutanix.ClusterAPIController.Tag(),
"ClusterApiCloudStackManagerRepository": imageRepository(bundle.CloudStack.ClusterAPIController),
"ClusterApiCloudStackManagerTag": bundle.CloudStack.ClusterAPIController.Tag(),
"ClusterApiCloudStackKubeRbacProxyRepository": imageRepository(bundle.CloudStack.KubeRbacProxy),
"ClusterApiCloudStackKubeRbacProxyTag": bundle.CloudStack.KubeRbacProxy.Tag(),
"ClusterApiVSphereKubeRbacProxyRepository": imageRepository(bundle.VSphere.KubeProxy),
"ClusterApiVSphereKubeRbacProxyTag": bundle.VSphere.KubeProxy.Tag(),
"DockerKubeRbacProxyRepository": imageRepository(bundle.Docker.KubeProxy),
"DockerKubeRbacProxyTag": bundle.Docker.KubeProxy.Tag(),
"DockerManagerRepository": imageRepository(bundle.Docker.Manager),
"DockerManagerTag": bundle.Docker.Manager.Tag(),
"EtcdadmBootstrapProviderRepository": imageRepository(bundle.ExternalEtcdBootstrap.Controller),
"EtcdadmBootstrapProviderTag": bundle.ExternalEtcdBootstrap.Controller.Tag(),
"EtcdadmBootstrapProviderKubeRbacProxyRepository": imageRepository(bundle.ExternalEtcdBootstrap.KubeProxy),
"EtcdadmBootstrapProviderKubeRbacProxyTag": bundle.ExternalEtcdBootstrap.KubeProxy.Tag(),
"EtcdadmControllerRepository": imageRepository(bundle.ExternalEtcdController.Controller),
"EtcdadmControllerTag": bundle.ExternalEtcdController.Controller.Tag(),
"EtcdadmControllerKubeRbacProxyRepository": imageRepository(bundle.ExternalEtcdController.KubeProxy),
"EtcdadmControllerKubeRbacProxyTag": bundle.ExternalEtcdController.KubeProxy.Tag(),
"DockerProviderVersion": bundle.Docker.Version,
"VSphereProviderVersion": bundle.VSphere.Version,
"CloudStackProviderVersion": bundle.CloudStack.Version,
"SnowProviderVersion": bundle.Snow.Version,
"TinkerbellProviderVersion": bundle.Tinkerbell.Version,
"NutanixProviderVersion": bundle.Nutanix.Version,
"ClusterApiProviderVersion": bundle.ClusterAPI.Version,
"KubeadmControlPlaneProviderVersion": bundle.ControlPlane.Version,
"KubeadmBootstrapProviderVersion": bundle.Bootstrap.Version,
"EtcdadmBootstrapProviderVersion": bundle.ExternalEtcdBootstrap.Version,
"EtcdadmControllerProviderVersion": bundle.ExternalEtcdController.Version,
"dir": path + "/" + clusterName + capiPrefix,
}
filePath, err := t.WriteToFile(clusterctlConfigTemplate, data, clusterctlConfigFile)
if err != nil {
return nil, fmt.Errorf("generating configuration file for clusterctl: %v", err)
}
if err := c.buildOverridesLayer(clusterSpec, clusterName, provider); err != nil {
return nil, err
}
return &clusterctlConfiguration{
configFile: filePath,
bootstrapVersion: fmt.Sprintf("%s:%s", kubeadmBootstrapProviderName, bundle.Bootstrap.Version),
controlPlaneVersion: fmt.Sprintf("kubeadm:%s", bundle.ControlPlane.Version),
coreVersion: fmt.Sprintf("cluster-api:%s", bundle.ClusterAPI.Version),
etcdadmBootstrapVersion: fmt.Sprintf("%s:%s", etcdadmBootstrapProviderName, bundle.ExternalEtcdBootstrap.Version),
etcdadmControllerVersion: fmt.Sprintf("%s:%s", etcdadmControllerProviderName, bundle.ExternalEtcdController.Version),
}, nil
}
var providerNamespaces = map[string]string{
constants.VSphereProviderName: constants.CapvSystemNamespace,
constants.DockerProviderName: constants.CapdSystemNamespace,
constants.CloudStackProviderName: constants.CapcSystemNamespace,
constants.AWSProviderName: constants.CapaSystemNamespace,
constants.SnowProviderName: constants.CapasSystemNamespace,
constants.NutanixProviderName: constants.CapxSystemNamespace,
constants.TinkerbellProviderName: constants.CaptSystemNamespace,
etcdadmBootstrapProviderName: constants.EtcdAdmBootstrapProviderSystemNamespace,
etcdadmControllerProviderName: constants.EtcdAdmControllerSystemNamespace,
kubeadmBootstrapProviderName: constants.CapiKubeadmBootstrapSystemNamespace,
}
func (c *Clusterctl) Upgrade(ctx context.Context, managementCluster *types.Cluster, provider providers.Provider, newSpec *cluster.Spec, changeDiff *clusterapi.CAPIChangeDiff) error {
clusterctlConfig, err := c.buildConfig(newSpec, managementCluster.Name, provider)
if err != nil {
return err
}
upgradeCommand := []string{
"upgrade", "apply",
"--config", clusterctlConfig.configFile,
"--kubeconfig", managementCluster.KubeconfigFile,
}
if changeDiff.ControlPlane != nil {
upgradeCommand = append(upgradeCommand, "--control-plane", fmt.Sprintf("%s/kubeadm:%s", constants.CapiKubeadmControlPlaneSystemNamespace, changeDiff.ControlPlane.NewVersion))
}
if changeDiff.Core != nil {
upgradeCommand = append(upgradeCommand, "--core", fmt.Sprintf("%s/cluster-api:%s", constants.CapiSystemNamespace, changeDiff.Core.NewVersion))
}
if changeDiff.InfrastructureProvider != nil {
newInfraProvider := fmt.Sprintf("%s/%s:%s", providerNamespaces[changeDiff.InfrastructureProvider.ComponentName], changeDiff.InfrastructureProvider.ComponentName, changeDiff.InfrastructureProvider.NewVersion)
upgradeCommand = append(upgradeCommand, "--infrastructure", newInfraProvider)
}
for _, bootstrapProvider := range changeDiff.BootstrapProviders {
newBootstrapProvider := fmt.Sprintf("%s/%s:%s", providerNamespaces[bootstrapProvider.ComponentName], bootstrapProvider.ComponentName, bootstrapProvider.NewVersion)
upgradeCommand = append(upgradeCommand, "--bootstrap", newBootstrapProvider)
}
providerEnvMap, err := provider.EnvMap(newSpec)
if err != nil {
return fmt.Errorf("failed generating provider env map for clusterctl upgrade: %v", err)
}
if _, err = c.ExecuteWithEnv(ctx, providerEnvMap, upgradeCommand...); err != nil {
return fmt.Errorf("failed running upgrade apply with clusterctl: %v", err)
}
return nil
}
func (c *Clusterctl) InstallEtcdadmProviders(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, infraProvider providers.Provider, installProviders []string) error {
if cluster == nil {
return fmt.Errorf("invalid cluster (nil)")
}
if cluster.Name == "" {
return fmt.Errorf("invalid cluster name '%s'", cluster.Name)
}
clusterctlConfig, err := c.buildConfig(clusterSpec, cluster.Name, infraProvider)
if err != nil {
return err
}
params := []string{
"init",
"--config", clusterctlConfig.configFile,
}
for _, provider := range installProviders {
switch provider {
case constants.EtcdAdmBootstrapProviderName:
params = append(params, "--bootstrap", clusterctlConfig.etcdadmBootstrapVersion)
case constants.EtcdadmControllerProviderName:
params = append(params, "--bootstrap", clusterctlConfig.etcdadmControllerVersion)
default:
return fmt.Errorf("unrecognized capi provider %s", provider)
}
}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
envMap, err := infraProvider.EnvMap(clusterSpec)
if err != nil {
return err
}
_, err = c.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
return fmt.Errorf("executing init: %v", err)
}
return nil
}
| 429 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
_ "embed"
"errors"
"fmt"
"os"
"testing"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/filewriter"
mockproviders "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type clusterctlTest struct {
*WithT
ctx context.Context
cluster *types.Cluster
clusterctl *executables.Clusterctl
e *mockexecutables.MockExecutable
provider *mockproviders.MockProvider
writer filewriter.FileWriter
providerEnvMap map[string]string
}
func newClusterctlTest(t *testing.T) *clusterctlTest {
ctrl := gomock.NewController(t)
_, writer := test.NewWriter(t)
reader := files.NewReader()
e := mockexecutables.NewMockExecutable(ctrl)
return &clusterctlTest{
WithT: NewWithT(t),
ctx: context.Background(),
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "config/c.kubeconfig",
},
e: e,
provider: mockproviders.NewMockProvider(ctrl),
clusterctl: executables.NewClusterctl(e, writer, reader),
writer: writer,
providerEnvMap: map[string]string{"var": "value"},
}
}
func (ct *clusterctlTest) expectBuildOverrideLayer() {
ct.provider.EXPECT().GetInfrastructureBundle(clusterSpec).Return(&types.InfrastructureBundle{})
}
func (ct *clusterctlTest) expectGetProviderEnvMap() {
ct.provider.EXPECT().EnvMap(clusterSpec).Return(ct.providerEnvMap, nil)
}
func TestClusterctlInitInfrastructure(t *testing.T) {
_, writer := test.NewWriter(t)
core := "cluster-api:v0.3.19"
bootstrap := "kubeadm:v0.3.19"
controlPlane := "kubeadm:v0.3.19"
etcdadmBootstrap := "etcdadm-bootstrap:v0.1.0"
etcdadmController := "etcdadm-controller:v0.1.0"
tests := []struct {
cluster *types.Cluster
env map[string]string
testName string
providerName string
providerVersion string
infrastructure string
wantConfig string
wantExecArgs []interface{}
}{
{
testName: "without kubconfig",
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "",
},
providerName: "vsphere",
providerVersion: versionBundle.VSphere.Version,
env: map[string]string{"ENV_VAR1": "VALUE1", "ENV_VAR2": "VALUE2"},
wantExecArgs: []interface{}{
"init", "--core", core, "--bootstrap", bootstrap, "--control-plane", controlPlane, "--infrastructure", "vsphere:v0.7.8", "--config", test.OfType("string"),
"--bootstrap", etcdadmBootstrap, "--bootstrap", etcdadmController,
},
wantConfig: "testdata/clusterctl_expected.yaml",
},
{
testName: "with kubconfig",
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "tmp/k.kubeconfig",
},
providerName: "vsphere",
providerVersion: versionBundle.VSphere.Version,
env: map[string]string{"ENV_VAR1": "VALUE1", "ENV_VAR2": "VALUE2"},
wantExecArgs: []interface{}{
"init", "--core", core, "--bootstrap", bootstrap, "--control-plane", controlPlane, "--infrastructure", "vsphere:v0.7.8", "--config", test.OfType("string"),
"--bootstrap", etcdadmBootstrap, "--bootstrap", etcdadmController,
"--kubeconfig", "tmp/k.kubeconfig",
},
wantConfig: "testdata/clusterctl_expected.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
defer func() {
if !t.Failed() {
os.RemoveAll(tt.cluster.Name)
}
}()
tc := newClusterctlTest(t)
gotConfig := ""
tc.provider.EXPECT().Name().Return(tt.providerName)
tc.provider.EXPECT().Version(clusterSpec).Return(tt.providerVersion)
tc.provider.EXPECT().EnvMap(clusterSpec).Return(tt.env, nil)
tc.provider.EXPECT().GetInfrastructureBundle(clusterSpec).Return(&types.InfrastructureBundle{})
tc.e.EXPECT().ExecuteWithEnv(tc.ctx, tt.env, tt.wantExecArgs...).Return(bytes.Buffer{}, nil).Times(1).Do(
func(ctx context.Context, envs map[string]string, args ...string) (stdout bytes.Buffer, err error) {
gotConfig = args[10]
tw := templater.New(writer)
path, err := os.Getwd()
if err != nil {
t.Fatalf("Error getting local folder: %v", err)
}
data := map[string]string{
"dir": path,
}
template, err := os.ReadFile(tt.wantConfig)
if err != nil {
t.Fatalf("Error reading local file %s: %v", tt.wantConfig, err)
}
filePath, err := tw.WriteToFile(string(template), data, "file.tmp")
if err != nil {
t.Fatalf("Error writing local file %s: %v", "file.tmp", err)
}
test.AssertFilesEquals(t, gotConfig, filePath)
return bytes.Buffer{}, nil
},
)
if err := tc.clusterctl.InitInfrastructure(tc.ctx, clusterSpec, tt.cluster, tc.provider); err != nil {
t.Fatalf("Clusterctl.InitInfrastructure() error = %v, want nil", err)
}
})
}
}
func TestClusterctlInitInfrastructureEnvMapError(t *testing.T) {
cluster := &types.Cluster{Name: "cluster-name"}
defer func() {
if !t.Failed() {
os.RemoveAll(cluster.Name)
}
}()
tt := newClusterctlTest(t)
tt.provider.EXPECT().Name()
tt.provider.EXPECT().Version(clusterSpec)
tt.provider.EXPECT().EnvMap(clusterSpec).Return(nil, errors.New("error with env map"))
tt.provider.EXPECT().GetInfrastructureBundle(clusterSpec).Return(&types.InfrastructureBundle{})
if err := tt.clusterctl.InitInfrastructure(tt.ctx, clusterSpec, cluster, tt.provider); err == nil {
t.Fatal("Clusterctl.InitInfrastructure() error = nil")
}
}
func TestClusterctlInitInfrastructureExecutableError(t *testing.T) {
cluster := &types.Cluster{Name: "cluster-name"}
defer func() {
if !t.Failed() {
os.RemoveAll(cluster.Name)
}
}()
tt := newClusterctlTest(t)
tt.provider.EXPECT().Name()
tt.provider.EXPECT().Version(clusterSpec)
tt.provider.EXPECT().EnvMap(clusterSpec)
tt.provider.EXPECT().GetInfrastructureBundle(clusterSpec).Return(&types.InfrastructureBundle{})
tt.e.EXPECT().ExecuteWithEnv(tt.ctx, nil, gomock.Any()).Return(bytes.Buffer{}, errors.New("error from execute with env"))
if err := tt.clusterctl.InitInfrastructure(tt.ctx, clusterSpec, cluster, tt.provider); err == nil {
t.Fatal("Clusterctl.InitInfrastructure() error = nil")
}
}
func TestClusterctlInitInfrastructureInvalidClusterNameError(t *testing.T) {
tt := newClusterctlTest(t)
if err := tt.clusterctl.InitInfrastructure(tt.ctx, clusterSpec, &types.Cluster{Name: ""}, tt.provider); err == nil {
t.Fatal("Clusterctl.InitInfrastructure() error != nil")
}
}
func TestClusterctlBackupManagement(t *testing.T) {
managementClusterState := fmt.Sprintf("cluster-state-backup-%s", time.Now().Format("2006-01-02T15_04_05"))
clusterName := "cluster"
tests := []struct {
testName string
cluster *types.Cluster
wantMoveArgs []interface{}
}{
{
testName: "backup success",
cluster: &types.Cluster{
Name: clusterName,
KubeconfigFile: "cluster.kubeconfig",
},
wantMoveArgs: []interface{}{"move", "--to-directory", fmt.Sprintf("%s/%s", clusterName, managementClusterState), "--kubeconfig", "cluster.kubeconfig", "--namespace", constants.EksaSystemNamespace},
},
{
testName: "no kubeconfig file",
cluster: &types.Cluster{
Name: clusterName,
},
wantMoveArgs: []interface{}{"move", "--to-directory", fmt.Sprintf("%s/%s", clusterName, managementClusterState), "--kubeconfig", "", "--namespace", constants.EksaSystemNamespace},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tc := newClusterctlTest(t)
tc.e.EXPECT().Execute(tc.ctx, tt.wantMoveArgs...)
if err := tc.clusterctl.BackupManagement(tc.ctx, tt.cluster, managementClusterState); err != nil {
t.Fatalf("Clusterctl.BackupManagement() error = %v, want nil", err)
}
})
}
}
func TestClusterctlBackupManagementFailed(t *testing.T) {
managementClusterState := fmt.Sprintf("cluster-state-backup-%s", time.Now().Format("2006-01-02T15_04_05"))
tt := newClusterctlTest(t)
cluster := &types.Cluster{
Name: "cluster",
KubeconfigFile: "cluster.kubeconfig",
}
wantMoveArgs := []interface{}{"move", "--to-directory", fmt.Sprintf("%s/%s", cluster.Name, managementClusterState), "--kubeconfig", "cluster.kubeconfig", "--namespace", constants.EksaSystemNamespace}
tt.e.EXPECT().Execute(tt.ctx, wantMoveArgs...).Return(bytes.Buffer{}, fmt.Errorf("error backing up management cluster resources"))
if err := tt.clusterctl.BackupManagement(tt.ctx, cluster, managementClusterState); err == nil {
t.Fatalf("Clusterctl.BackupManagement() error = %v, want nil", err)
}
}
func TestClusterctlMoveManagement(t *testing.T) {
tests := []struct {
testName string
from *types.Cluster
to *types.Cluster
clusterName string
wantMoveArgs []interface{}
}{
{
testName: "no kubeconfig",
from: &types.Cluster{},
to: &types.Cluster{},
clusterName: "",
wantMoveArgs: []interface{}{"move", "--to-kubeconfig", "", "--namespace", constants.EksaSystemNamespace, "--filter-cluster", ""},
},
{
testName: "no kubeconfig in 'from' cluster",
from: &types.Cluster{},
to: &types.Cluster{
KubeconfigFile: "to.kubeconfig",
},
clusterName: "",
wantMoveArgs: []interface{}{"move", "--to-kubeconfig", "to.kubeconfig", "--namespace", constants.EksaSystemNamespace, "--filter-cluster", ""},
},
{
testName: "with both kubeconfigs",
from: &types.Cluster{
KubeconfigFile: "from.kubeconfig",
},
to: &types.Cluster{
KubeconfigFile: "to.kubeconfig",
},
clusterName: "",
wantMoveArgs: []interface{}{"move", "--to-kubeconfig", "to.kubeconfig", "--namespace", constants.EksaSystemNamespace, "--filter-cluster", "", "--kubeconfig", "from.kubeconfig"},
},
{
testName: "with filter cluster",
from: &types.Cluster{
KubeconfigFile: "from.kubeconfig",
},
to: &types.Cluster{
KubeconfigFile: "to.kubeconfig",
},
clusterName: "test-cluster",
wantMoveArgs: []interface{}{"move", "--to-kubeconfig", "to.kubeconfig", "--namespace", constants.EksaSystemNamespace, "--filter-cluster", "test-cluster", "--kubeconfig", "from.kubeconfig"},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tc := newClusterctlTest(t)
tc.e.EXPECT().Execute(tc.ctx, tt.wantMoveArgs...)
if err := tc.clusterctl.MoveManagement(tc.ctx, tt.from, tt.to, tt.clusterName); err != nil {
t.Fatalf("Clusterctl.MoveManagement() error = %v, want nil", err)
}
})
}
}
func TestClusterctlUpgradeAllProvidersSucess(t *testing.T) {
tt := newClusterctlTest(t)
changeDiff := &clusterapi.CAPIChangeDiff{
Core: &types.ComponentChangeDiff{
ComponentName: "cluster-api",
NewVersion: "v0.3.19",
},
ControlPlane: &types.ComponentChangeDiff{
ComponentName: "kubeadm",
NewVersion: "v0.3.19",
},
InfrastructureProvider: &types.ComponentChangeDiff{
ComponentName: "vsphere",
NewVersion: "v0.4.1",
},
BootstrapProviders: []types.ComponentChangeDiff{
{
ComponentName: "kubeadm",
NewVersion: "v0.3.19",
},
{
ComponentName: "etcdadm-bootstrap",
NewVersion: "v0.1.0",
},
{
ComponentName: "etcdadm-controller",
NewVersion: "v0.1.0",
},
},
}
tt.expectBuildOverrideLayer()
tt.expectGetProviderEnvMap()
tt.e.EXPECT().ExecuteWithEnv(tt.ctx, tt.providerEnvMap,
"upgrade", "apply",
"--config", test.OfType("string"),
"--kubeconfig", tt.cluster.KubeconfigFile,
"--control-plane", "capi-kubeadm-control-plane-system/kubeadm:v0.3.19",
"--core", "capi-system/cluster-api:v0.3.19",
"--infrastructure", "capv-system/vsphere:v0.4.1",
"--bootstrap", "capi-kubeadm-bootstrap-system/kubeadm:v0.3.19",
"--bootstrap", "etcdadm-bootstrap-provider-system/etcdadm-bootstrap:v0.1.0",
"--bootstrap", "etcdadm-controller-system/etcdadm-controller:v0.1.0",
)
tt.Expect(tt.clusterctl.Upgrade(tt.ctx, tt.cluster, tt.provider, clusterSpec, changeDiff)).To(Succeed())
}
func TestClusterctlUpgradeInfrastructureProvidersSucess(t *testing.T) {
tt := newClusterctlTest(t)
changeDiff := &clusterapi.CAPIChangeDiff{
InfrastructureProvider: &types.ComponentChangeDiff{
ComponentName: "vsphere",
NewVersion: "v0.4.1",
},
}
tt.expectBuildOverrideLayer()
tt.expectGetProviderEnvMap()
tt.e.EXPECT().ExecuteWithEnv(tt.ctx, tt.providerEnvMap,
"upgrade", "apply",
"--config", test.OfType("string"),
"--kubeconfig", tt.cluster.KubeconfigFile,
"--infrastructure", "capv-system/vsphere:v0.4.1",
)
tt.Expect(tt.clusterctl.Upgrade(tt.ctx, tt.cluster, tt.provider, clusterSpec, changeDiff)).To(Succeed())
}
func TestClusterctlUpgradeInfrastructureProvidersError(t *testing.T) {
tt := newClusterctlTest(t)
changeDiff := &clusterapi.CAPIChangeDiff{
InfrastructureProvider: &types.ComponentChangeDiff{
ComponentName: "vsphere",
NewVersion: "v0.4.1",
},
}
tt.expectBuildOverrideLayer()
tt.expectGetProviderEnvMap()
tt.e.EXPECT().ExecuteWithEnv(tt.ctx, tt.providerEnvMap,
"upgrade", "apply",
"--config", test.OfType("string"),
"--kubeconfig", tt.cluster.KubeconfigFile,
"--infrastructure", "capv-system/vsphere:v0.4.1",
).Return(bytes.Buffer{}, errors.New("error in exec"))
tt.Expect(tt.clusterctl.Upgrade(tt.ctx, tt.cluster, tt.provider, clusterSpec, changeDiff)).NotTo(Succeed())
}
var clusterSpec = test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle = versionBundle
})
var versionBundle = &cluster.VersionsBundle{
KubeDistro: &cluster.KubeDistro{
Kubernetes: cluster.VersionedRepository{
Repository: "public.ecr.aws/eks-distro/kubernetes",
Tag: "v1.19.6-eks-1-19-2",
},
CoreDNS: cluster.VersionedRepository{
Repository: "public.ecr.aws/eks-distro/coredns",
Tag: "v1.8.0-eks-1-19-2",
},
Etcd: cluster.VersionedRepository{
Repository: "public.ecr.aws/eks-distro/etcd-io",
Tag: "v3.4.14-eks-1-19-2",
},
},
VersionsBundle: &v1alpha1.VersionsBundle{
KubeVersion: "1.19",
EksD: v1alpha1.EksDRelease{
KindNode: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.2",
},
},
CertManager: v1alpha1.CertManagerBundle{
Acmesolver: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/cert-manager/cert-manager-acmesolver:v1.1.0",
},
Cainjector: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/cert-manager/cert-manager-cainjector:v1.1.0",
},
Controller: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/cert-manager/cert-manager-controller:v1.1.0",
},
Webhook: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/cert-manager/cert-manager-webhook:v1.1.0",
},
Manifest: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Version: "v1.5.3",
},
ClusterAPI: v1alpha1.CoreClusterAPI{
Version: "v0.3.19",
Controller: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api/cluster-api-controller:v0.3.19-eks-a-0.0.1.build.38",
},
KubeProxy: kubeProxyVersion08,
Components: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Metadata: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
},
Bootstrap: v1alpha1.KubeadmBootstrapBundle{
Version: "v0.3.19",
Controller: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v0.3.19-eks-a-0.0.1.build.38",
},
KubeProxy: kubeProxyVersion08,
Components: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Metadata: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
},
ControlPlane: v1alpha1.KubeadmControlPlaneBundle{
Version: "v0.3.19",
Controller: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v0.3.19-eks-a-0.0.1.build.38",
},
KubeProxy: kubeProxyVersion08,
Components: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Metadata: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
},
Snow: v1alpha1.SnowBundle{
Version: "v0.0.0",
},
VSphere: v1alpha1.VSphereBundle{
Version: "v0.7.8",
ClusterAPIController: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v0.7.8-eks-a-0.0.1.build.38",
},
KubeProxy: kubeProxyVersion08,
},
Nutanix: v1alpha1.NutanixBundle{
Version: "v1.0.1",
ClusterAPIController: v1alpha1.Image{
URI: "public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix/release/manager:v1.0.1-eks-a-v0.0.0-dev-build.1",
},
},
Tinkerbell: v1alpha1.TinkerbellBundle{
Version: "v0.1.0",
ClusterAPIController: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/tinkerbell/cluster-api-provider-tinkerbell:v0.1.0-eks-a-0.0.1.build.38",
},
},
CloudStack: v1alpha1.CloudStackBundle{
Version: "v0.7.8",
ClusterAPIController: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.7.8-eks-a-0.0.1.build.38",
},
KubeRbacProxy: kubeProxyVersion08,
},
Docker: v1alpha1.DockerBundle{
Version: "v0.3.19",
Manager: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api/capd-manager:v0.3.19-eks-a-0.0.1.build.38",
},
KubeProxy: kubeProxyVersion08,
},
Eksa: v1alpha1.EksaBundle{
CliTools: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/eks-anywhere-cli-tools:v1-19-1-75ac0bf61974d7ea5d83c17a1c629f26c142cca7",
},
},
ExternalEtcdBootstrap: v1alpha1.EtcdadmBootstrapBundle{
Version: "v0.1.0",
Components: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Metadata: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Controller: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/mrajashree/etcdadm-bootstrap-provider:v0.1.0",
},
KubeProxy: kubeProxyVersion08,
},
ExternalEtcdController: v1alpha1.EtcdadmControllerBundle{
Version: "v0.1.0",
Components: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Metadata: v1alpha1.Manifest{
URI: "testdata/fake_manifest.yaml",
},
Controller: v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/mrajashree/etcdadm-controller:v0.1.0",
},
KubeProxy: kubeProxyVersion08,
},
},
}
var kubeProxyVersion08 = v1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/brancz/kube-rbac-proxy:v0.8.0-25df7d96779e2a305a22c6e3f9425c3465a77244",
}
| 584 |
eks-anywhere | aws | Go | package executables
import (
"bytes"
"context"
_ "embed"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
"github.com/aws/eks-anywhere/pkg/templater"
)
//go:embed config/cmk.ini
var cmkConfigTemplate string
const (
cmkPath = "cmk"
cmkConfigFileNameTemplate = "cmk_%s.ini"
defaultCloudStackPreflightTimeout = "30"
rootDomain = "ROOT"
domainDelimiter = "/"
)
// Cmk this struct wraps around the CloudMonkey executable CLI to perform operations against a CloudStack endpoint.
type Cmk struct {
writer filewriter.FileWriter
executable Executable
configMap map[string]decoder.CloudStackProfileConfig
}
type listTemplatesResponse struct {
CmkTemplates []cmkTemplate `json:"template"`
}
func (c *Cmk) Close(ctx context.Context) error {
return nil
}
func (c *Cmk) ValidateTemplatePresent(ctx context.Context, profile string, domainId string, zoneId string, account string, template v1alpha1.CloudStackResourceIdentifier) error {
command := newCmkCommand("list templates")
applyCmkArgs(&command, appendArgs("templatefilter=all"), appendArgs("listall=true"))
if len(template.Id) > 0 {
applyCmkArgs(&command, withCloudStackId(template.Id))
} else {
applyCmkArgs(&command, withCloudStackName(template.Name))
}
applyCmkArgs(&command, withCloudStackZoneId(zoneId))
if len(domainId) > 0 {
applyCmkArgs(&command, withCloudStackDomainId(domainId))
if len(account) > 0 {
applyCmkArgs(&command, withCloudStackAccount(account))
}
}
result, err := c.exec(ctx, profile, command...)
if err != nil {
return fmt.Errorf("getting templates info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return fmt.Errorf("template %s not found", template)
}
response := listTemplatesResponse{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return fmt.Errorf("parsing response into json: %v", err)
}
templates := response.CmkTemplates
if len(templates) > 1 {
return fmt.Errorf("duplicate templates %s found", template)
} else if len(templates) == 0 {
return fmt.Errorf("template %s not found", template)
}
return nil
}
// SearchTemplate looks for a template by name or by id and returns template name if found.
func (c *Cmk) SearchTemplate(ctx context.Context, profile string, template v1alpha1.CloudStackResourceIdentifier) (string, error) {
command := newCmkCommand("list templates")
applyCmkArgs(&command, appendArgs("templatefilter=all"), appendArgs("listall=true"))
if len(template.Id) > 0 {
applyCmkArgs(&command, withCloudStackId(template.Id))
} else {
applyCmkArgs(&command, withCloudStackName(template.Name))
}
result, err := c.exec(ctx, profile, command...)
if err != nil {
return "", fmt.Errorf("getting templates info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return "", nil
}
response := listTemplatesResponse{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return "", fmt.Errorf("parsing response into json: %v", err)
}
templates := response.CmkTemplates
if len(templates) > 1 {
return "", fmt.Errorf("duplicate templates %s found", template)
} else if len(templates) == 0 {
return "", nil
}
return templates[0].Name, nil
}
func (c *Cmk) ValidateServiceOfferingPresent(ctx context.Context, profile string, zoneId string, serviceOffering v1alpha1.CloudStackResourceIdentifier) error {
command := newCmkCommand("list serviceofferings")
if len(serviceOffering.Id) > 0 {
applyCmkArgs(&command, withCloudStackId(serviceOffering.Id))
} else {
applyCmkArgs(&command, withCloudStackName(serviceOffering.Name))
}
applyCmkArgs(&command, withCloudStackZoneId(zoneId))
result, err := c.exec(ctx, profile, command...)
if err != nil {
return fmt.Errorf("getting service offerings info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return fmt.Errorf("service offering %s not found", serviceOffering)
}
response := struct {
CmkServiceOfferings []cmkServiceOffering `json:"serviceoffering"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return fmt.Errorf("parsing response into json: %v", err)
}
offerings := response.CmkServiceOfferings
if len(offerings) > 1 {
return fmt.Errorf("duplicate service offering %s found", serviceOffering)
} else if len(offerings) == 0 {
return fmt.Errorf("service offering %s not found", serviceOffering)
}
return nil
}
func (c *Cmk) ValidateDiskOfferingPresent(ctx context.Context, profile string, zoneId string, diskOffering v1alpha1.CloudStackResourceDiskOffering) error {
command := newCmkCommand("list diskofferings")
if len(diskOffering.Id) > 0 {
applyCmkArgs(&command, withCloudStackId(diskOffering.Id))
} else {
applyCmkArgs(&command, withCloudStackName(diskOffering.Name))
}
applyCmkArgs(&command, withCloudStackZoneId(zoneId))
result, err := c.exec(ctx, profile, command...)
if err != nil {
return fmt.Errorf("getting disk offerings info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return fmt.Errorf("disk offering ID/Name %s/%s not found", diskOffering.Id, diskOffering.Name)
}
response := struct {
CmkDiskOfferings []cmkDiskOffering `json:"diskoffering"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return fmt.Errorf("parsing response into json: %v", err)
}
offerings := response.CmkDiskOfferings
if len(offerings) > 1 {
return fmt.Errorf("duplicate disk offering ID/Name %s/%s found", diskOffering.Id, diskOffering.Name)
} else if len(offerings) == 0 {
return fmt.Errorf("disk offering ID/Name %s/%s not found", diskOffering.Id, diskOffering.Name)
}
if offerings[0].Customized && diskOffering.CustomSize <= 0 {
return fmt.Errorf("disk offering size %d <= 0 for customized disk offering", diskOffering.CustomSize)
}
if !offerings[0].Customized && diskOffering.CustomSize > 0 {
return fmt.Errorf("disk offering size %d > 0 for non-customized disk offering", diskOffering.CustomSize)
}
return nil
}
func (c *Cmk) ValidateAffinityGroupsPresent(ctx context.Context, profile string, domainId string, account string, affinityGroupIds []string) error {
for _, affinityGroupId := range affinityGroupIds {
command := newCmkCommand("list affinitygroups")
applyCmkArgs(&command, withCloudStackId(affinityGroupId))
// account must be specified with a domainId
// domainId can be specified without account
if len(domainId) > 0 {
applyCmkArgs(&command, withCloudStackDomainId(domainId))
if len(account) > 0 {
applyCmkArgs(&command, withCloudStackAccount(account))
}
}
result, err := c.exec(ctx, profile, command...)
if err != nil {
return fmt.Errorf("getting affinity group info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return fmt.Errorf(fmt.Sprintf("affinity group %s not found", affinityGroupId))
}
response := struct {
CmkAffinityGroups []cmkAffinityGroup `json:"affinitygroup"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return fmt.Errorf("parsing response into json: %v", err)
}
affinityGroup := response.CmkAffinityGroups
if len(affinityGroup) > 1 {
return fmt.Errorf("duplicate affinity group %s found", affinityGroupId)
} else if len(affinityGroup) == 0 {
return fmt.Errorf("affinity group %s not found", affinityGroupId)
}
}
return nil
}
func (c *Cmk) ValidateZoneAndGetId(ctx context.Context, profile string, zone v1alpha1.CloudStackZone) (string, error) {
command := newCmkCommand("list zones")
if len(zone.Id) > 0 {
applyCmkArgs(&command, withCloudStackId(zone.Id))
} else {
applyCmkArgs(&command, withCloudStackName(zone.Name))
}
result, err := c.exec(ctx, profile, command...)
if err != nil {
return "", fmt.Errorf("getting zones info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return "", fmt.Errorf("zone %s not found", zone)
}
response := struct {
CmkZones []cmkResourceIdentifier `json:"zone"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return "", fmt.Errorf("parsing response into json: %v", err)
}
cmkZones := response.CmkZones
if len(cmkZones) > 1 {
return "", fmt.Errorf("duplicate zone %s found", zone)
} else if len(cmkZones) == 0 {
return "", fmt.Errorf("zone %s not found", zone)
}
return cmkZones[0].Id, nil
}
func (c *Cmk) ValidateDomainAndGetId(ctx context.Context, profile string, domain string) (string, error) {
domainId := ""
command := newCmkCommand("list domains")
// "list domains" API does not support querying by domain path, so here we extract the domain name which is the last part of the input domain
tokens := strings.Split(domain, domainDelimiter)
domainName := tokens[len(tokens)-1]
applyCmkArgs(&command, withCloudStackName(domainName), appendArgs("listall=true"))
result, err := c.exec(ctx, profile, command...)
if err != nil {
return domainId, fmt.Errorf("getting domain info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return domainId, fmt.Errorf("domain %s not found", domain)
}
response := struct {
CmkDomains []cmkDomain `json:"domain"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return domainId, fmt.Errorf("parsing response into json: %v", err)
}
domains := response.CmkDomains
var domainPath string
if domain == rootDomain {
domainPath = rootDomain
} else {
domainPath = strings.Join([]string{rootDomain, domain}, domainDelimiter)
}
for _, d := range domains {
if d.Path == domainPath {
domainId = d.Id
break
}
}
if domainId == "" {
return domainId, fmt.Errorf("domain(s) found for domain name %s, but not found a domain with domain path %s", domain, domainPath)
}
return domainId, nil
}
func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string) error {
command := newCmkCommand("list networks")
// account must be specified within a domainId
// domainId can be specified without account
if len(domainId) > 0 {
applyCmkArgs(&command, withCloudStackDomainId(domainId))
if len(account) > 0 {
applyCmkArgs(&command, withCloudStackAccount(account))
}
}
applyCmkArgs(&command, withCloudStackZoneId(zoneId))
result, err := c.exec(ctx, profile, command...)
if err != nil {
return fmt.Errorf("getting network info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return fmt.Errorf("network %s not found in zone %s", network, zoneId)
}
response := struct {
CmkNetworks []cmkResourceIdentifier `json:"network"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return fmt.Errorf("parsing response into json: %v", err)
}
networks := response.CmkNetworks
// filter by network name -- cmk does not support name= filter
// if network id and name are both provided, the following code is to confirm name matches return value retrieved by id.
// if only name is provided, the following code is to only get networks with specified name.
if len(network.Name) > 0 {
networks = []cmkResourceIdentifier{}
for _, net := range response.CmkNetworks {
if net.Name == network.Name {
networks = append(networks, net)
}
}
}
if len(networks) > 1 {
return fmt.Errorf("duplicate network %s found", network)
} else if len(networks) == 0 {
return fmt.Errorf("network %s not found in zoneRef %s", network, zoneId)
}
return nil
}
func (c *Cmk) ValidateAccountPresent(ctx context.Context, profile string, account string, domainId string) error {
// If account is not specified then no need to check its presence
if len(account) == 0 {
return nil
}
command := newCmkCommand("list accounts")
applyCmkArgs(&command, withCloudStackName(account), withCloudStackDomainId(domainId))
result, err := c.exec(ctx, profile, command...)
if err != nil {
return fmt.Errorf("getting accounts info - %s: %v", result.String(), err)
}
if result.Len() == 0 {
return fmt.Errorf("account %s not found", account)
}
response := struct {
CmkAccounts []cmkAccount `json:"account"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return fmt.Errorf("parsing response into json: %v", err)
}
accounts := response.CmkAccounts
if len(accounts) > 1 {
return fmt.Errorf("duplicate account %s found", account)
} else if len(accounts) == 0 {
return fmt.Errorf("account %s not found", account)
}
return nil
}
// NewCmk initializes CloudMonkey executable to query CloudStack via CLI.
func NewCmk(executable Executable, writer filewriter.FileWriter, config *decoder.CloudStackExecConfig) (*Cmk, error) {
if config == nil {
return nil, fmt.Errorf("nil exec config for CloudMonkey, unable to proceed")
}
configMap := make(map[string]decoder.CloudStackProfileConfig, len(config.Profiles))
for _, profile := range config.Profiles {
configMap[profile.Name] = profile
}
return &Cmk{
writer: writer,
executable: executable,
configMap: configMap,
}, nil
}
func (c *Cmk) GetManagementApiEndpoint(profile string) (string, error) {
config, exist := c.configMap[profile]
if exist {
return config.ManagementUrl, nil
}
return "", fmt.Errorf("profile %s does not exist", profile)
}
func (c *Cmk) CleanupVms(ctx context.Context, profile string, clusterName string, dryRun bool) error {
command := newCmkCommand("list virtualmachines")
applyCmkArgs(&command, withCloudStackKeyword(clusterName), appendArgs("listall=true"))
result, err := c.exec(ctx, profile, command...)
if err != nil {
return fmt.Errorf("listing virtual machines in cluster %s: %s: %v", clusterName, result.String(), err)
}
if result.Len() == 0 {
logger.Info("virtual machines not found", "cluster", clusterName)
return nil
}
response := struct {
CmkVirtualMachines []cmkResourceIdentifier `json:"virtualmachine"`
}{}
if err = json.Unmarshal(result.Bytes(), &response); err != nil {
return fmt.Errorf("parsing response into json: %v", err)
}
for _, vm := range response.CmkVirtualMachines {
if dryRun {
logger.Info("Found ", "vm_name", vm.Name)
continue
}
stopCommand := newCmkCommand("stop virtualmachine")
applyCmkArgs(&stopCommand, withCloudStackId(vm.Id), appendArgs("forced=true"))
stopResult, err := c.exec(ctx, profile, stopCommand...)
if err != nil {
return fmt.Errorf("stopping virtual machine with name %s and id %s: %s: %v", vm.Name, vm.Id, stopResult.String(), err)
}
destroyCommand := newCmkCommand("destroy virtualmachine")
applyCmkArgs(&destroyCommand, withCloudStackId(vm.Id), appendArgs("expunge=true"))
destroyResult, err := c.exec(ctx, profile, destroyCommand...)
if err != nil {
return fmt.Errorf("destroying virtual machine with name %s and id %s: %s: %v", vm.Name, vm.Id, destroyResult.String(), err)
}
logger.Info("Deleted ", "vm_name", vm.Name, "vm_id", vm.Id)
}
return nil
}
func (c *Cmk) exec(ctx context.Context, profile string, args ...string) (stdout bytes.Buffer, err error) {
if err != nil {
return bytes.Buffer{}, fmt.Errorf("failed get environment map: %v", err)
}
configFile, err := c.buildCmkConfigFile(profile)
if err != nil {
return bytes.Buffer{}, fmt.Errorf("failed cmk validations: %v", err)
}
argsWithConfigFile := append([]string{"-c", configFile}, args...)
return c.executable.Execute(ctx, argsWithConfigFile...)
}
func (c *Cmk) buildCmkConfigFile(profile string) (configFile string, err error) {
config, exist := c.configMap[profile]
if !exist {
return "", fmt.Errorf("profile %s does not exist", profile)
}
t := templater.New(c.writer)
config.Timeout = defaultCloudStackPreflightTimeout
if timeout, isSet := os.LookupEnv("CLOUDSTACK_PREFLIGHT_TIMEOUT"); isSet {
if _, err := strconv.ParseUint(timeout, 10, 16); err != nil {
return "", fmt.Errorf("CLOUDSTACK_PREFLIGHT_TIMEOUT must be a number: %v", err)
}
config.Timeout = timeout
}
writtenFileName, err := t.WriteToFile(cmkConfigTemplate, config, fmt.Sprintf(cmkConfigFileNameTemplate, profile))
if err != nil {
return "", fmt.Errorf("creating file for cmk config: %v", err)
}
configFile, err = filepath.Abs(writtenFileName)
if err != nil {
return "", fmt.Errorf("failed to generate absolute filepath for generated config file at %s", writtenFileName)
}
return configFile, nil
}
type cmkTemplate struct {
Id string `json:"id"`
Name string `json:"name"`
Zonename string `json:"zonename"`
}
type cmkServiceOffering struct {
CpuNumber int `json:"cpunumber"`
CpuSpeed int `json:"cpuspeed"`
Memory int `json:"memory"`
Id string `json:"id"`
Name string `json:"name"`
}
type cmkResourceIdentifier struct {
Id string `json:"id"`
Name string `json:"name"`
}
type cmkDiskOffering struct {
Id string `json:"id"`
Name string `json:"name"`
Customized bool `json:"iscustomized"`
}
type cmkAffinityGroup struct {
Type string `json:"type"`
Id string `json:"id"`
Name string `json:"name"`
}
type cmkDomain struct {
Id string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
}
type cmkAccount struct {
RoleType string `json:"roletype"`
Domain string `json:"domain"`
Id string `json:"id"`
Name string `json:"name"`
}
| 522 |
eks-anywhere | aws | Go | package executables
import (
"fmt"
"strings"
)
type cmkCommandArgs func(*[]string)
func newCmkCommand(command string) []string {
return strings.Fields(command)
}
func applyCmkArgs(params *[]string, args ...cmkCommandArgs) {
for _, arg := range args {
arg(params)
}
}
func appendArgs(new ...string) cmkCommandArgs {
return func(args *[]string) {
*args = append(*args, new...)
}
}
func withCloudStackDomainId(domainId string) cmkCommandArgs {
return appendArgs(fmt.Sprintf("domainid=\"%s\"", domainId))
}
func withCloudStackAccount(account string) cmkCommandArgs {
return appendArgs(fmt.Sprintf("account=\"%s\"", account))
}
func withCloudStackZoneId(zoneId string) cmkCommandArgs {
return appendArgs(fmt.Sprintf("zoneid=\"%s\"", zoneId))
}
func withCloudStackId(id string) cmkCommandArgs {
return appendArgs(fmt.Sprintf("id=\"%s\"", id))
}
func withCloudStackName(name string) cmkCommandArgs {
return appendArgs(fmt.Sprintf("name=\"%s\"", name))
}
func withCloudStackKeyword(keyword string) cmkCommandArgs {
return appendArgs(fmt.Sprintf("keyword=\"%s\"", keyword))
}
| 49 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
_ "embed"
"errors"
"fmt"
"path/filepath"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
const (
cmkConfigFileName = "cmk_test_name.ini"
accountName = "account1"
rootDomain = "ROOT"
rootDomainID = "5300cdac-74d5-11ec-8696-c81f66d3e965"
domain = "foo/domain1"
domainName = "domain1"
domainID = "7700cdac-74d5-11ec-8696-c81f66d3e965"
domain2 = "foo/bar/domain1"
domain2Name = "domain1"
domain2ID = "8800cdac-74d5-11ec-8696-c81f66d3e965"
zoneID = "4e3b338d-87a6-4189-b931-a1747edeea8f"
)
var execConfig = &decoder.CloudStackExecConfig{
Profiles: []decoder.CloudStackProfileConfig{
{
Name: "test_name",
ApiKey: "test",
SecretKey: "test",
ManagementUrl: "http://1.1.1.1:8080/client/api",
},
},
}
var execConfigWithMultipleProfiles = &decoder.CloudStackExecConfig{
Profiles: []decoder.CloudStackProfileConfig{
execConfig.Profiles[0],
{
Name: "test_name_2",
ApiKey: "test_2",
SecretKey: "test_2",
ManagementUrl: "http://1.1.1.1:8080/client/api_2",
},
},
}
var zones = []v1alpha1.CloudStackZone{
{Name: "TEST_RESOURCE", Network: v1alpha1.CloudStackResourceIdentifier{Name: "TEST_RESOURCE"}},
{Name: "TEST_RESOURCE", Network: v1alpha1.CloudStackResourceIdentifier{Id: "TEST_RESOURCE"}},
{Id: "TEST_RESOURCE", Network: v1alpha1.CloudStackResourceIdentifier{Name: "TEST_RESOURCE"}},
{Id: "TEST_RESOURCE", Network: v1alpha1.CloudStackResourceIdentifier{Id: "TEST_RESOURCE"}},
}
var resourceName = v1alpha1.CloudStackResourceIdentifier{
Name: "TEST_RESOURCE",
}
var resourceID = v1alpha1.CloudStackResourceIdentifier{
Id: "TEST_RESOURCE",
}
var diskOfferingResourceName = v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "TEST_RESOURCE",
},
MountPath: "/TEST_RESOURCE",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
var diskOfferingResourceID = v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "TEST_RESOURCE",
},
MountPath: "/TEST_RESOURCE",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
var diskOfferingCustomSizeInGB = v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Id: "TEST_RESOURCE",
},
CustomSize: 1,
MountPath: "/TEST_RESOURCE",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
func TestCmkCleanupVms(t *testing.T) {
_, writer := test.NewWriter(t)
configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName))
clusterName := "test"
tests := []struct {
testName string
argumentsExecCalls [][]string
jsonResponseFile string
cmkFunc func(cmk executables.Cmk, ctx context.Context) error
cmkResponseError error
wantErr bool
}{
{
testName: "listvirtualmachines json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCalls: [][]string{{
"-c", configFilePath,
"list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true",
}},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false)
},
cmkResponseError: nil,
wantErr: true,
},
{
testName: "dry run succeeds",
jsonResponseFile: "testdata/cmk_list_virtualmachine_singular.json",
argumentsExecCalls: [][]string{
{
"-c", configFilePath,
"list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true",
},
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, true)
},
cmkResponseError: nil,
wantErr: false,
},
{
testName: "listvirtualmachines no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCalls: [][]string{{
"-c", configFilePath,
"list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true",
}},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false)
},
cmkResponseError: nil,
wantErr: false,
},
{
testName: "listaffinitygroups json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCalls: [][]string{{
"-c", configFilePath,
"list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true",
}},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false)
},
cmkResponseError: nil,
wantErr: true,
},
{
testName: "full runthrough succeeds",
jsonResponseFile: "testdata/cmk_list_virtualmachine_singular.json",
argumentsExecCalls: [][]string{
{
"-c", configFilePath,
"list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true",
},
{
"-c", configFilePath, "stop", "virtualmachine", "id=\"30e8b0b1-f286-4372-9f1f-441e199a3f49\"",
"forced=true",
},
{
"-c", configFilePath, "destroy", "virtualmachine", "id=\"30e8b0b1-f286-4372-9f1f-441e199a3f49\"",
"expunge=true",
},
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false)
},
cmkResponseError: nil,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
fileContent := test.ReadFile(t, tt.jsonResponseFile)
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
for _, argsList := range tt.argumentsExecCalls {
executable.EXPECT().Execute(ctx, argsList).
Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError)
}
cmk, _ := executables.NewCmk(executable, writer, execConfig)
err := tt.cmkFunc(*cmk, ctx)
if tt.wantErr && err != nil || !tt.wantErr && err == nil {
return
}
t.Fatalf("Cmk error: %v", err)
})
}
}
func TestNewCmkNilConfig(t *testing.T) {
_, err := executables.NewCmk(nil, nil, nil)
if err == nil {
t.Fatalf("Expected cmk to fail on creation with nil config but instead it succeeded")
}
}
func TestCmkListOperations(t *testing.T) {
_, writer := test.NewWriter(t)
configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName))
tests := []struct {
testName string
argumentsExecCall []string
jsonResponseFile string
cmkFunc func(cmk executables.Cmk, ctx context.Context) error
cmkResponseError error
wantErr bool
wantResultCount int
}{
{
testName: "listdomain success on name root",
jsonResponseFile: "testdata/cmk_list_domain_root.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "domains", fmt.Sprintf("name=\"%s\"", rootDomain), "listall=true",
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
domainID, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, rootDomain)
if domainID != rootDomainID {
t.Fatalf("Expected domain id: %s, actual domain id: %s", rootDomainID, domainID)
}
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 0,
},
{
testName: "listdomain success on name filter",
jsonResponseFile: "testdata/cmk_list_domain_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true",
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
actualDomainID, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain)
if actualDomainID != domainID {
t.Fatalf("Expected domain id: %s, actual domain id: %s", domainID, actualDomainID)
}
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 0,
},
{
testName: "listdomain failure on multiple returns",
jsonResponseFile: "testdata/cmk_list_domain_multiple.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true",
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domainName)
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listdomain success on multiple returns",
jsonResponseFile: "testdata/cmk_list_domain_multiple.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "domains", fmt.Sprintf("name=\"%s\"", domain2Name), "listall=true",
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
domainID, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain2)
if domainID != domain2ID {
t.Fatalf("Expected domain id: %s, actual domain id: %s", domain2ID, domainID)
}
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 0,
},
{
testName: "listdomains json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true",
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain)
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listdomains no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true",
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain)
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listaccounts success on name filter",
jsonResponseFile: "testdata/cmk_list_account_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "accounts", fmt.Sprintf("name=\"%s\"", accountName), fmt.Sprintf("domainid=\"%s\"", domainID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainID)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 0,
},
{
testName: "listaccounts json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "accounts", fmt.Sprintf("name=\"%s\"", accountName), fmt.Sprintf("domainid=\"%s\"", domainID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listaccounts no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "accounts", fmt.Sprintf("name=\"%s\"", accountName), fmt.Sprintf("domainid=\"%s\"", domainID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listzones success on name filter",
jsonResponseFile: "testdata/cmk_list_zone_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0])
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listzones success on id filter",
jsonResponseFile: "testdata/cmk_list_zone_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "zones", fmt.Sprintf("id=\"%s\"", resourceID.Id),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[2])
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listzones failure on multple results",
jsonResponseFile: "testdata/cmk_list_zone_multiple.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "zones", fmt.Sprintf("id=\"%s\"", resourceID.Id),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[2])
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "listzones failure on none results",
jsonResponseFile: "testdata/cmk_list_zone_none.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "zones", fmt.Sprintf("id=\"%s\"", resourceID.Id),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[2])
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "listzones failure on cmk failure",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0])
return err
},
cmkResponseError: errors.New("cmk calling return exception"),
wantErr: true,
wantResultCount: 0,
},
{
testName: "listzones no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0])
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listzones json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0])
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listnetworks success on name filter",
jsonResponseFile: "testdata/cmk_list_network_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "networks", fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainID, zones[2].Network, zones[2].Id, accountName)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listnetworks failure on multiple results",
jsonResponseFile: "testdata/cmk_list_network_multiple.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "networks", fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainID, zones[2].Network, zones[2].Id, accountName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "listnetworks failure on none results",
jsonResponseFile: "testdata/cmk_list_network_none.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "networks", fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainID, zones[2].Network, zones[2].Id, accountName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "listnetworks failure on cmk failure",
jsonResponseFile: "testdata/cmk_list_network_multiple.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "networks", fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainID, zones[2].Network, zones[2].Id, accountName)
},
cmkResponseError: errors.New("cmk calling return exception"),
wantErr: true,
wantResultCount: 1,
},
{
testName: "listnetworks no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "networks", fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainID, zones[2].Network, zones[2].Id, accountName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listnetworks json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "networks", fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainID, zones[2].Network, zones[2].Id, accountName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listserviceofferings success on name filter",
jsonResponseFile: "testdata/cmk_list_serviceoffering_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "serviceofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, resourceName)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listserviceofferings success on id filter",
jsonResponseFile: "testdata/cmk_list_serviceoffering_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "serviceofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, resourceID)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listserviceofferings no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "serviceofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, resourceID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listserviceofferings json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "serviceofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, resourceName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listdiskofferings success on name filter",
jsonResponseFile: "testdata/cmk_list_diskoffering_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceName)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listdiskofferings success on id filter",
jsonResponseFile: "testdata/cmk_list_diskoffering_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceID)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listdiskofferings no results",
jsonResponseFile: "testdata/cmk_list_diskoffering_empty.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listdiskofferings no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listdiskofferings multiple results",
jsonResponseFile: "testdata/cmk_list_diskoffering_multiple.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 4,
},
{
testName: "listdiskofferings customized results with customSizeInGB > 0",
jsonResponseFile: "testdata/cmk_list_diskoffering_singular_customized.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingCustomSizeInGB)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listdiskofferings non-customized results with customSizeInGB > 0",
jsonResponseFile: "testdata/cmk_list_diskoffering_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingCustomSizeInGB)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "listdiskofferings non-customized results with customSizeInGB > 0",
jsonResponseFile: "testdata/cmk_list_diskoffering_singular_customized.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "listdiskofferings throw exception",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceID)
},
cmkResponseError: errors.New("cmk calling return exception"),
wantErr: true,
wantResultCount: 0,
},
{
testName: "listdiskofferings json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "diskofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneID),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneID, diskOfferingResourceName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "validatetemplate success on name filter",
jsonResponseFile: "testdata/cmk_list_template_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneID), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainID, zoneID, accountName, resourceName)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "validatetemplate failure when passing invalid profile",
jsonResponseFile: "testdata/cmk_list_template_singular.json",
argumentsExecCall: nil,
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, "xxx", domainID, zoneID, accountName, resourceName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "validatetemplate success on id filter",
jsonResponseFile: "testdata/cmk_list_template_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainID, zoneID, accountName, resourceID)
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "validatetemplate failure on multiple results",
jsonResponseFile: "testdata/cmk_list_template_multiple.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainID, zoneID, accountName, resourceID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "validatetemplate failure on none results",
jsonResponseFile: "testdata/cmk_list_template_none.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainID, zoneID, accountName, resourceID)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 1,
},
{
testName: "validatetemplate failure on cmk failure",
jsonResponseFile: "testdata/cmk_list_template_none.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("zoneid=\"%s\"", zoneID), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainID, zoneID, accountName, resourceID)
},
cmkResponseError: errors.New("cmk calling return exception"),
wantErr: true,
wantResultCount: 1,
},
{
testName: "validatetemplate no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneID), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainID, zoneID, accountName, resourceName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "validatetemplate json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneID), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainID, zoneID, accountName, resourceName)
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "searchtemplate success on name filter",
jsonResponseFile: "testdata/cmk_list_template_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.SearchTemplate(ctx, execConfig.Profiles[0].Name, resourceName)
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "searchtemplate success on id filter",
jsonResponseFile: "testdata/cmk_list_template_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceID.Id),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.SearchTemplate(ctx, execConfig.Profiles[0].Name, resourceID)
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "searchtemplate on none results",
jsonResponseFile: "testdata/cmk_list_template_none.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.SearchTemplate(ctx, execConfig.Profiles[0].Name, resourceName)
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "searchtemplate failure on cmk failure",
jsonResponseFile: "testdata/cmk_list_template_none.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.SearchTemplate(ctx, execConfig.Profiles[0].Name, resourceName)
return err
},
cmkResponseError: errors.New("cmk calling return exception"),
wantErr: true,
wantResultCount: 1,
},
{
testName: "searchtemplate no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.SearchTemplate(ctx, execConfig.Profiles[0].Name, resourceName)
return err
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 0,
},
{
testName: "searchtemplate json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
_, err := cmk.SearchTemplate(ctx, execConfig.Profiles[0].Name, resourceName)
return err
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listaffinitygroups success on id filter",
jsonResponseFile: "testdata/cmk_list_affinitygroup_singular.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "affinitygroups", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainID, accountName, []string{resourceID.Id})
},
cmkResponseError: nil,
wantErr: false,
wantResultCount: 1,
},
{
testName: "listaffinitygroups no results",
jsonResponseFile: "testdata/cmk_list_empty_response.json",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "affinitygroups", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainID, accountName, []string{resourceID.Id})
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
{
testName: "listaffinitygroups json parse exception",
jsonResponseFile: "testdata/cmk_non_json_response.txt",
argumentsExecCall: []string{
"-c", configFilePath,
"list", "affinitygroups", fmt.Sprintf("id=\"%s\"", resourceID.Id), fmt.Sprintf("domainid=\"%s\"", domainID), fmt.Sprintf("account=\"%s\"", accountName),
},
cmkFunc: func(cmk executables.Cmk, ctx context.Context) error {
return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainID, accountName, []string{resourceID.Id})
},
cmkResponseError: nil,
wantErr: true,
wantResultCount: 0,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
fileContent := test.ReadFile(t, tt.jsonResponseFile)
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
if tt.argumentsExecCall != nil {
executable.EXPECT().Execute(ctx, tt.argumentsExecCall).
Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError)
}
cmk, _ := executables.NewCmk(executable, writer, execConfig)
err := tt.cmkFunc(*cmk, ctx)
if tt.wantErr && err != nil || !tt.wantErr && err == nil {
return
}
t.Fatalf("Cmk error: %v", err)
})
}
}
func TestCmkGetManagementApiEndpoint(t *testing.T) {
_, writer := test.NewWriter(t)
mockCtrl := gomock.NewController(t)
tt := NewWithT(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
cmk, _ := executables.NewCmk(executable, writer, execConfigWithMultipleProfiles)
endpoint, err := cmk.GetManagementApiEndpoint("test_name")
tt.Expect(err).To(BeNil())
tt.Expect(endpoint).To(Equal("http://1.1.1.1:8080/client/api"))
endpoint, err = cmk.GetManagementApiEndpoint("test_name_2")
tt.Expect(err).To(BeNil())
tt.Expect(endpoint).To(Equal("http://1.1.1.1:8080/client/api_2"))
_, err = cmk.GetManagementApiEndpoint("xxx")
tt.Expect(err).NotTo(BeNil())
}
| 1,047 |
eks-anywhere | aws | Go | package executables
import (
"bytes"
"context"
)
type commandRunner interface {
Run(cmd *Command) (stdout bytes.Buffer, err error)
}
type Command struct {
commandRunner commandRunner
ctx context.Context
args []string
stdIn []byte
envVars map[string]string
}
func NewCommand(ctx context.Context, commandRunner commandRunner, args ...string) *Command {
return &Command{
commandRunner: commandRunner,
ctx: ctx,
args: args,
}
}
func (c *Command) WithEnvVars(envVars map[string]string) *Command {
c.envVars = envVars
return c
}
func (c *Command) WithStdIn(stdIn []byte) *Command {
c.stdIn = stdIn
return c
}
func (c *Command) Run() (out bytes.Buffer, err error) {
return c.commandRunner.Run(c)
}
| 41 |
eks-anywhere | aws | Go | package executables
const (
generatedDir = "generated"
overridesDir = "overrides"
)
| 7 |
eks-anywhere | aws | Go | package executables
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/aws/eks-anywhere/pkg/logger"
)
// Temporary: Curated packages dev and prod accounts are currently hard coded
// This is because there is no mechanism to extract these values as of now.
const (
dockerPath = "docker"
defaultRegistry = "public.ecr.aws"
packageProdDomain = "783794618700.dkr.ecr.us-west-2.amazonaws.com"
packageDevDomain = "857151390494.dkr.ecr.us-west-2.amazonaws.com"
)
type Docker struct {
Executable
}
func NewDocker(executable Executable) *Docker {
return &Docker{Executable: executable}
}
func (d *Docker) GetDockerLBPort(ctx context.Context, clusterName string) (port string, err error) {
clusterLBName := fmt.Sprintf("%s-lb", clusterName)
if stdout, err := d.Execute(ctx, "port", clusterLBName, "6443/tcp"); err != nil {
return "", err
} else {
return strings.Split(stdout.String(), ":")[1], nil
}
}
func (d *Docker) PullImage(ctx context.Context, image string) error {
logger.V(2).Info("Pulling docker image", "image", image)
if _, err := d.Execute(ctx, "pull", image); err != nil {
return err
} else {
return nil
}
}
func (d *Docker) Version(ctx context.Context) (int, error) {
cmdOutput, err := d.Execute(ctx, "version", "--format", "{{.Client.Version}}")
if err != nil {
return 0, fmt.Errorf("please check if docker is installed and running %v", err)
}
dockerVersion := strings.TrimSpace(cmdOutput.String())
versionSplit := strings.Split(dockerVersion, ".")
installedMajorVersion := versionSplit[0]
installedMajorVersionInt, err := strconv.Atoi(installedMajorVersion)
if err != nil {
return 0, err
}
return installedMajorVersionInt, nil
}
func (d *Docker) AllocatedMemory(ctx context.Context) (uint64, error) {
cmdOutput, err := d.Execute(ctx, "info", "--format", "'{{json .MemTotal}}'")
if err != nil {
return 0, fmt.Errorf("please check if docker is installed and running %v", err)
}
totalMemory := cmdOutput.String()
totalMemory = totalMemory[1 : len(totalMemory)-2]
return strconv.ParseUint(totalMemory, 10, 64)
}
func (d *Docker) TagImage(ctx context.Context, image string, endpoint string) error {
replacer := strings.NewReplacer(defaultRegistry, endpoint, packageProdDomain, endpoint, packageDevDomain, endpoint)
localImage := replacer.Replace(image)
logger.Info("Tagging image", "image", image, "local image", localImage)
if _, err := d.Execute(ctx, "tag", image, localImage); err != nil {
return err
}
return nil
}
func (d *Docker) PushImage(ctx context.Context, image string, endpoint string) error {
replacer := strings.NewReplacer(defaultRegistry, endpoint, packageProdDomain, endpoint, packageDevDomain, endpoint)
localImage := replacer.Replace(image)
logger.Info("Pushing", "image", localImage)
if _, err := d.Execute(ctx, "push", localImage); err != nil {
return err
}
return nil
}
func (d *Docker) Login(ctx context.Context, endpoint, username, password string) error {
params := []string{"login", endpoint, "--username", username, "--password-stdin"}
logger.Info(fmt.Sprintf("Logging in to docker registry %s", endpoint))
_, err := d.ExecuteWithStdin(ctx, []byte(password), params...)
return err
}
func (d *Docker) LoadFromFile(ctx context.Context, filepath string) error {
if _, err := d.Execute(ctx, "load", "-i", filepath); err != nil {
return fmt.Errorf("loading images from file: %v", err)
}
return nil
}
func (d *Docker) SaveToFile(ctx context.Context, filepath string, images ...string) error {
params := make([]string, 0, 3+len(images))
params = append(params, "save", "-o", filepath)
params = append(params, images...)
if _, err := d.Execute(ctx, params...); err != nil {
return fmt.Errorf("saving images to file: %v", err)
}
return nil
}
func (d *Docker) Run(ctx context.Context, image string, name string, cmd []string, flags ...string) error {
params := []string{"run", "-d", "-i"}
params = append(params, flags...)
params = append(params, "--name", name, image)
params = append(params, cmd...)
if _, err := d.Execute(ctx, params...); err != nil {
return fmt.Errorf("running docker container %s with image %s: %v", name, image, err)
}
return nil
}
func (d *Docker) ForceRemove(ctx context.Context, name string) error {
params := []string{"rm", "-f", name}
if _, err := d.Execute(ctx, params...); err != nil {
return fmt.Errorf("force removing docker container %s: %v", name, err)
}
return nil
}
// CheckContainerExistence checks whether a Docker container with the provided name exists
// It returns true if a container with the name exists, false if it doesn't and an error if it encounters some other error.
func (d *Docker) CheckContainerExistence(ctx context.Context, name string) (bool, error) {
params := []string{"container", "inspect", name}
_, err := d.Execute(ctx, params...)
if err == nil {
return true, nil
} else if strings.Contains(err.Error(), "No such container") {
return false, nil
}
return false, fmt.Errorf("checking if a docker container with name %s exists: %v", name, err)
}
| 154 |
eks-anywhere | aws | Go | package executables
import "context"
type DockerContainer interface {
Init(ctx context.Context) error
Close(ctx context.Context) error
ContainerName() string
}
func NewDockerExecutableBuilder(dockerContainer DockerContainer) *dockerExecutableBuilder {
return &dockerExecutableBuilder{
container: dockerContainer,
}
}
type dockerExecutableBuilder struct {
container DockerContainer
}
func (d *dockerExecutableBuilder) Build(binaryName string) Executable {
return NewDockerExecutable(binaryName, d.container.ContainerName())
}
func (b *dockerExecutableBuilder) Init(ctx context.Context) (Closer, error) {
return b.container.Close, b.container.Init(ctx)
}
| 28 |
eks-anywhere | aws | Go | package executables
import (
"bytes"
"context"
"fmt"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type DockerClient interface {
Login(ctx context.Context, endpoint, username, password string) error
PullImage(ctx context.Context, image string) error
Execute(ctx context.Context, args ...string) (stdout bytes.Buffer, err error)
}
type dockerContainer struct {
image string
workingDir string
mountDirs []string
containerName string
dockerClient DockerClient
initOnce, closeOnce sync.Once
*retrier.Retrier
}
func newDockerContainer(image, workingDir string, mountDirs []string, dockerClient DockerClient) *dockerContainer {
return &dockerContainer{
image: image,
workingDir: workingDir,
mountDirs: mountDirs,
containerName: containerNamePrefix + strconv.FormatInt(time.Now().UnixNano(), 10),
dockerClient: dockerClient,
Retrier: retrier.NewWithMaxRetries(maxRetries, backOffPeriod),
}
}
func NewDockerContainerCustomBinary(docker DockerClient) *dockerContainer {
return &dockerContainer{
dockerClient: docker,
}
}
func (d *dockerContainer) Init(ctx context.Context) error {
var err error
d.initOnce.Do(func() {
err = d.Retry(func() error {
return d.dockerClient.PullImage(ctx, d.image)
})
if err != nil {
return
}
var absWorkingDir string
absWorkingDir, err = filepath.Abs(d.workingDir)
if err != nil {
err = fmt.Errorf("getting abs path for mount dir: %v", err)
return
}
params := []string{"run", "-d", "--name", d.containerName, "--network", "host", "-w", absWorkingDir, "-v", "/var/run/docker.sock:/var/run/docker.sock"}
for _, m := range d.mountDirs {
var absMountDir string
absMountDir, err = filepath.Abs(m)
if err != nil {
err = fmt.Errorf("getting abs path for mount dir: %v", err)
return
}
params = append(params, "-v", fmt.Sprintf("%[1]s:%[1]s", absMountDir))
}
// start container and keep it running in the background
logger.V(3).Info("Initializing long running container", "name", d.containerName, "image", d.image)
params = append(params, "--entrypoint", "sleep", d.image, "infinity")
_, err = d.dockerClient.Execute(ctx, params...)
})
return err
}
func (d *dockerContainer) ContainerName() string {
return d.containerName
}
func (d *dockerContainer) Close(ctx context.Context) error {
if d == nil {
return nil
}
var err error
d.closeOnce.Do(func() {
logger.V(3).Info("Cleaning up long running container", "name", d.containerName)
_, err = d.dockerClient.Execute(ctx, "rm", "-f", "-v", d.containerName)
})
return err
}
| 104 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type dockerContainerTest struct {
*WithT
ctx context.Context
c *mocks.MockDockerClient
}
func newDockerContainerTest(t *testing.T) *dockerContainerTest {
ctrl := gomock.NewController(t)
c := mocks.NewMockDockerClient(ctrl)
return &dockerContainerTest{
WithT: NewWithT(t),
ctx: context.Background(),
c: c,
}
}
func TestDockerContainerInit(t *testing.T) {
g := newDockerContainerTest(t)
g.c.EXPECT().PullImage(g.ctx, "").Return(nil)
g.c.EXPECT().Execute(g.ctx, gomock.Any()).Return(bytes.Buffer{}, nil)
d := executables.NewDockerContainerCustomBinary(g.c)
g.Expect(d.Init(context.Background())).To(Succeed())
}
func TestDockerContainerInitErrorPullImage(t *testing.T) {
g := newDockerContainerTest(t)
g.c.EXPECT().PullImage(g.ctx, "").Return(errors.New("error in pull")).Times(5)
d := executables.NewDockerContainerCustomBinary(g.c)
d.Retrier = retrier.NewWithMaxRetries(5, 0)
g.Expect(d.Init(context.Background())).To(MatchError(ContainSubstring("error in pull")))
}
| 48 |
eks-anywhere | aws | Go | package executables
import (
"bytes"
"context"
"fmt"
)
const containerNamePrefix = "eksa_"
type linuxDockerExecutable struct {
cli string
containerName string
}
// This currently returns a linuxDockerExecutable, but if we support other types of docker executables we can change
// the name of this constructor.
func NewDockerExecutable(cli string, containerName string) Executable {
return &linuxDockerExecutable{
cli: cli,
containerName: containerName,
}
}
func (e *linuxDockerExecutable) Execute(ctx context.Context, args ...string) (stdout bytes.Buffer, err error) {
return e.Command(ctx, args...).Run()
}
func (e *linuxDockerExecutable) ExecuteWithStdin(ctx context.Context, in []byte, args ...string) (stdout bytes.Buffer, err error) {
return e.Command(ctx, args...).WithStdIn(in).Run()
}
func (e *linuxDockerExecutable) ExecuteWithEnv(ctx context.Context, envs map[string]string, args ...string) (stdout bytes.Buffer, err error) {
return e.Command(ctx, args...).WithEnvVars(envs).Run()
}
func (e *linuxDockerExecutable) Command(ctx context.Context, args ...string) *Command {
return NewCommand(ctx, e, args...)
}
func (e *linuxDockerExecutable) Run(cmd *Command) (stdout bytes.Buffer, err error) {
return execute(cmd.ctx, "docker", cmd.stdIn, cmd.envVars, e.buildCommand(cmd.envVars, e.cli, cmd.args...)...)
}
func (e *linuxDockerExecutable) buildCommand(envs map[string]string, cli string, args ...string) []string {
var envVars []string
for k, v := range envs {
envVars = append(envVars, "-e", fmt.Sprintf("%s=%s", k, v))
}
dockerCommands := []string{"exec", "-i"}
dockerCommands = append(dockerCommands, envVars...)
dockerCommands = append(dockerCommands, e.containerName, e.cli)
dockerCommands = append(dockerCommands, args...)
return dockerCommands
}
| 59 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"errors"
"fmt"
"reflect"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
)
func TestGetDockerLBPort(t *testing.T) {
clusterName := "clusterName"
wantPort := "test:port"
clusterLBName := fmt.Sprintf("%s-lb", clusterName)
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, []string{"port", clusterLBName, "6443/tcp"}).Return(*bytes.NewBufferString(wantPort), nil)
d := executables.NewDocker(executable)
_, err := d.GetDockerLBPort(ctx, clusterName)
if err != nil {
t.Fatalf("Docker.GetDockerLBPort() error = %v, want nil", err)
}
}
func TestDockerPullImage(t *testing.T) {
image := "test_image"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "pull", image).Return(bytes.Buffer{}, nil)
d := executables.NewDocker(executable)
err := d.PullImage(ctx, image)
if err != nil {
t.Fatalf("Docker.PullImage() error = %v, want nil", err)
}
}
func TestDockerVersion(t *testing.T) {
version := "1.234"
wantVersion := 1
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "version", "--format", "{{.Client.Version}}").Return(*bytes.NewBufferString(version), nil)
d := executables.NewDocker(executable)
v, err := d.Version(ctx)
if err != nil {
t.Fatalf("Docker.Version() error = %v, want nil", err)
}
if !reflect.DeepEqual(v, wantVersion) {
t.Fatalf("Docker.Version() version = %v, want %v", v, wantVersion)
}
}
func TestDockerAllocatedMemory(t *testing.T) {
memory := "12345"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "info", "--format", "'{{json .MemTotal}}'").Return(*bytes.NewBufferString(memory), nil)
d := executables.NewDocker(executable)
mem, err := d.AllocatedMemory(ctx)
if err != nil {
t.Fatalf("Docker.AllocatedMemory() error = %v, want %v", err, mem)
}
}
func TestDockerLoadFromFile(t *testing.T) {
file := "file"
g := NewWithT(t)
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "load", "-i", file).Return(bytes.Buffer{}, nil)
d := executables.NewDocker(executable)
g.Expect(d.LoadFromFile(ctx, file)).To(Succeed())
}
func TestDockerSaveToFileMultipleImages(t *testing.T) {
file := "file"
image1 := "image1:tag1"
image2 := "image2:tag2"
image3 := "image3:tag3"
g := NewWithT(t)
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "save", "-o", file, image1, image2, image3).Return(bytes.Buffer{}, nil)
d := executables.NewDocker(executable)
g.Expect(d.SaveToFile(ctx, file, image1, image2, image3)).To(Succeed())
}
func TestDockerSaveToFileNoImages(t *testing.T) {
file := "file"
g := NewWithT(t)
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "save", "-o", file).Return(bytes.Buffer{}, nil)
d := executables.NewDocker(executable)
g.Expect(d.SaveToFile(ctx, file)).To(Succeed())
}
func TestDockerRunBasicSucess(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "run", "-d", "-i", "--name", "basic_test", "basic_test:latest")
if err := d.Run(ctx, "basic_test:latest", "basic_test", []string{}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestDockerRunWithCmdSucess(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "run", "-d", "-i", "--name", "basic_test", "basic_test:latest", "foo", "bar")
if err := d.Run(ctx, "basic_test:latest", "basic_test", []string{"foo", "bar"}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestDockerRunWithFlagsSucess(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "run", "-d", "-i", "--flag1", "--flag2", "--name", "basic_test", "basic_test:latest")
if err := d.Run(ctx, "basic_test:latest", "basic_test", []string{}, "--flag1", "--flag2"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestDockerRunWithCmdAndFlagsSucess(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "run", "-d", "-i", "--flag1", "--flag2", "--name", "basic_test", "basic_test:latest", "foo", "bar")
if err := d.Run(ctx, "basic_test:latest", "basic_test", []string{"foo", "bar"}, "--flag1", "--flag2"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestDockerRunFailure(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
name := "basic_test"
image := "basic_test:latest"
dockerRunError := "docker run error"
expectedError := fmt.Sprintf("running docker container %s with image %s: %s", name, image, dockerRunError)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "run", "-d", "-i", "--name", name, image).Return(bytes.Buffer{}, errors.New(dockerRunError))
err := d.Run(ctx, image, name, []string{})
assert.EqualError(t, err, expectedError, "Error should be: %v, got: %v", expectedError, err)
}
func TestDockerForceRemoveSuccess(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "rm", "-f", "basic_test")
if err := d.ForceRemove(ctx, "basic_test"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestDockerForceRemoveFailure(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
name := "basic_test"
dockerForceRemoveError := "docker force remove error"
expectedError := fmt.Sprintf("force removing docker container %s: %s", name, dockerForceRemoveError)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "rm", "-f", name).Return(bytes.Buffer{}, errors.New(dockerForceRemoveError))
err := d.ForceRemove(ctx, name)
assert.EqualError(t, err, expectedError, "Error should be: %v, got: %v", expectedError, err)
}
func TestDockerCheckContainerExistenceExists(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
name := "basic_test"
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "container", "inspect", name).Return(bytes.Buffer{}, nil)
exists, err := d.CheckContainerExistence(ctx, name)
assert.True(t, exists)
assert.Nil(t, err)
}
func TestDockerCheckContainerExistenceDoesNotExists(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
name := "basic_test"
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "container", "inspect", name).Return(bytes.Buffer{}, fmt.Errorf("Error: No such container: %s", name))
exists, err := d.CheckContainerExistence(ctx, name)
assert.False(t, exists)
assert.Nil(t, err)
}
func TestDockerCheckContainerExistenceOtherError(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
name := "basic_test"
dockerError := "An unexpected error occured"
expectedError := fmt.Sprintf("checking if a docker container with name %s exists: %s", name, dockerError)
executable := mockexecutables.NewMockExecutable(mockCtrl)
d := executables.NewDocker(executable)
executable.EXPECT().Execute(ctx, "container", "inspect", name).Return(bytes.Buffer{}, errors.New(dockerError))
exists, err := d.CheckContainerExistence(ctx, name)
assert.False(t, exists)
assert.EqualError(t, err, expectedError, "Error should be: %v, got: %v", expectedError, err)
}
| 284 |
eks-anywhere | aws | Go | package executables
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
const (
redactMask = "*****"
)
var redactedEnvKeys = []string{
constants.VSphereUsernameKey,
constants.VSpherePasswordKey,
constants.GovcUsernameKey,
constants.GovcPasswordKey,
decoder.CloudStackCloudConfigB64SecretKey,
eksaGithubTokenEnv,
githubTokenEnv,
config.EksaAccessKeyIdEnv,
config.EksaSecretAccessKeyEnv,
config.AwsAccessKeyIdEnv,
config.AwsSecretAccessKeyEnv,
constants.SnowCredentialsKey,
constants.SnowCertsKey,
constants.NutanixUsernameKey,
constants.NutanixPasswordKey,
constants.RegistryUsername,
constants.RegistryPassword,
}
type executable struct {
cli string
}
type Executable interface {
Execute(ctx context.Context, args ...string) (stdout bytes.Buffer, err error)
ExecuteWithEnv(ctx context.Context, envs map[string]string, args ...string) (stdout bytes.Buffer, err error) // TODO: remove this from interface in favor of Command
ExecuteWithStdin(ctx context.Context, in []byte, args ...string) (stdout bytes.Buffer, err error) // TODO: remove this from interface in favor of Command
Command(ctx context.Context, args ...string) *Command
Run(cmd *Command) (stdout bytes.Buffer, err error)
}
// this should only be called through the executables.builder.
func NewExecutable(cli string) Executable {
return &executable{
cli: cli,
}
}
func (e *executable) Execute(ctx context.Context, args ...string) (stdout bytes.Buffer, err error) {
return e.Command(ctx, args...).Run()
}
func (e *executable) ExecuteWithStdin(ctx context.Context, in []byte, args ...string) (stdout bytes.Buffer, err error) {
return e.Command(ctx, args...).WithStdIn(in).Run()
}
func (e *executable) ExecuteWithEnv(ctx context.Context, envs map[string]string, args ...string) (stdout bytes.Buffer, err error) {
return e.Command(ctx, args...).WithEnvVars(envs).Run()
}
func (e *executable) Command(ctx context.Context, args ...string) *Command {
return NewCommand(ctx, e, args...)
}
func (e *executable) Run(cmd *Command) (stdout bytes.Buffer, err error) {
for k, v := range cmd.envVars {
os.Setenv(k, v)
}
return execute(cmd.ctx, e.cli, cmd.stdIn, cmd.envVars, cmd.args...)
}
func (e *executable) Close(ctx context.Context) error {
return nil
}
func RedactCreds(cmd string, envMap map[string]string) string {
redactedEnvs := []string{}
for _, redactedEnvKey := range redactedEnvKeys {
if env, found := os.LookupEnv(redactedEnvKey); found && env != "" {
redactedEnvs = append(redactedEnvs, env)
} else if env, found := envMap[redactedEnvKey]; found && env != "" {
redactedEnvs = append(redactedEnvs, env)
}
}
for _, redactedEnv := range redactedEnvs {
cmd = strings.ReplaceAll(cmd, redactedEnv, redactMask)
}
return cmd
}
func execute(ctx context.Context, cli string, in []byte, envVars map[string]string, args ...string) (stdout bytes.Buffer, err error) {
var stderr bytes.Buffer
cmd := exec.CommandContext(ctx, cli, args...)
logger.V(6).Info("Executing command", "cmd", RedactCreds(cmd.String(), envVars))
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if len(in) != 0 {
cmd.Stdin = bytes.NewReader(in)
}
err = cmd.Run()
if err != nil {
if stderr.Len() > 0 {
if logger.MaxLogging() {
logger.V(logger.MaxLogLevel).Info(cli, "stderr", stderr.String())
}
return stdout, errors.New(stderr.String())
} else {
if !logger.MaxLogging() {
logger.V(8).Info(cli, "stdout", stdout.String())
logger.V(8).Info(cli, "stderr", stderr.String())
}
return stdout, errors.New(fmt.Sprint(err))
}
}
if !logger.MaxLogging() {
logger.V(8).Info(cli, "stdout", stdout.String())
logger.V(8).Info(cli, "stderr", stderr.String())
}
return stdout, nil
}
| 135 |
eks-anywhere | aws | Go | package executables_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
)
func TestRedactCreds(t *testing.T) {
str := "My username is username123. My password is password456"
t.Setenv(constants.VSphereUsernameKey, "username123")
os.Unsetenv(constants.VSpherePasswordKey)
os.Unsetenv("var")
envMap := map[string]string{"var": "value", constants.VSpherePasswordKey: "password456"}
expected := "My username is *****. My password is *****"
redactedStr := executables.RedactCreds(str, envMap)
if redactedStr != expected {
t.Fatalf("executables.RedactCreds expected = %s, got = %s", expected, redactedStr)
}
}
| 25 |
eks-anywhere | aws | Go | package executables
import (
"context"
"time"
)
func KubectlWaitRetryPolicy(k *Kubectl, totalRetries int, err error) (retry bool, wait time.Duration) {
return k.kubectlWaitRetryPolicy(totalRetries, err)
}
func CallKubectlPrivateWait(k *Kubectl, ctx context.Context, kubeconfig string, timeoutTime time.Time, forCondition string, property string, namespace string) error {
return k.wait(ctx, kubeconfig, timeoutTime, forCondition, property, namespace)
}
| 15 |
eks-anywhere | aws | Go | package executables
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/git/providers/github"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
fluxPath = "flux"
eksaGithubTokenEnv = "EKSA_GITHUB_TOKEN"
githubTokenEnv = "GITHUB_TOKEN"
githubProvider = "github"
gitProvider = "git"
defaultPrivateKeyAlgorithm = "ecdsa"
)
type Flux struct {
Executable
}
func NewFlux(executable Executable) *Flux {
return &Flux{
Executable: executable,
}
}
// BootstrapGithub creates the GitHub repository if it doesn’t exist, and commits the toolkit
// components manifests to the main branch. Then it configures the target cluster to synchronize with the repository.
// If the toolkit components are present on the cluster, the bootstrap command will perform an upgrade if needed.
func (f *Flux) BootstrapGithub(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
c := fluxConfig.Spec
params := []string{
"bootstrap",
githubProvider,
"--repository", c.Github.Repository,
"--owner", c.Github.Owner,
"--path", c.ClusterConfigPath,
"--ssh-key-algorithm", defaultPrivateKeyAlgorithm,
}
params = setUpCommonParamsBootstrap(cluster, fluxConfig, params)
if c.Github.Personal {
params = append(params, "--personal")
}
token, err := github.GetGithubAccessTokenFromEnv()
if err != nil {
return fmt.Errorf("setting token env: %v", err)
}
env := make(map[string]string)
env[githubTokenEnv] = token
_, err = f.ExecuteWithEnv(ctx, env, params...)
if err != nil {
return fmt.Errorf("executing flux bootstrap github: %v", err)
}
return err
}
// BootstrapGit commits the toolkit components manifests to the branch of a Git repository.
// It then configures the target cluster to synchronize with the repository. If the toolkit components are present on the cluster, the
// bootstrap command will perform an upgrade if needed.
func (f *Flux) BootstrapGit(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig, cliConfig *config.CliConfig) error {
c := fluxConfig.Spec
params := []string{
"bootstrap",
gitProvider,
"--url", c.Git.RepositoryUrl,
"--path", c.ClusterConfigPath,
"--private-key-file", cliConfig.GitPrivateKeyFile,
"--silent",
}
params = setUpCommonParamsBootstrap(cluster, fluxConfig, params)
if fluxConfig.Spec.Git.SshKeyAlgorithm != "" {
params = append(params, "--ssh-key-algorithm", fluxConfig.Spec.Git.SshKeyAlgorithm)
} else {
params = append(params, "--ssh-key-algorithm", defaultPrivateKeyAlgorithm)
}
if cliConfig.GitSshKeyPassphrase != "" {
params = append(params, "--password", cliConfig.GitSshKeyPassphrase)
}
env := make(map[string]string)
env["SSH_KNOWN_HOSTS"] = cliConfig.GitKnownHostsFile
_, err := f.ExecuteWithEnv(ctx, env, params...)
if err != nil {
return fmt.Errorf("executing flux bootstrap git: %v", err)
}
return err
}
func setUpCommonParamsBootstrap(cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig, params []string) []string {
c := fluxConfig.Spec
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
if c.Branch != "" {
params = append(params, "--branch", c.Branch)
}
if c.SystemNamespace != "" {
params = append(params, "--namespace", c.SystemNamespace)
}
return params
}
func (f *Flux) Uninstall(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
c := fluxConfig.Spec
params := []string{
"uninstall",
"--silent",
}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
if c.SystemNamespace != "" {
params = append(params, "--namespace", c.SystemNamespace)
}
_, err := f.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("uninstalling flux: %v", err)
}
return err
}
func (f *Flux) SuspendKustomization(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
c := fluxConfig.Spec
if c.SystemNamespace == "" {
return fmt.Errorf("executing flux suspend kustomization: namespace empty")
}
params := []string{"suspend", "ks", c.SystemNamespace, "--namespace", c.SystemNamespace}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
_, err := f.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("executing flux suspend kustomization: %v", err)
}
return err
}
func (f *Flux) ResumeKustomization(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
c := fluxConfig.Spec
if c.SystemNamespace == "" {
return fmt.Errorf("executing flux resume kustomization: namespace empty")
}
params := []string{"resume", "ks", c.SystemNamespace, "--namespace", c.SystemNamespace}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
_, err := f.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("executing flux resume kustomization: %v", err)
}
return err
}
func (f *Flux) Reconcile(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
c := fluxConfig.Spec
params := []string{"reconcile", "source", "git"}
if c.SystemNamespace != "" {
params = append(params, c.SystemNamespace, "--namespace", c.SystemNamespace)
} else {
params = append(params, "flux-system")
}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
if _, err := f.Execute(ctx, params...); err != nil {
return fmt.Errorf("executing flux reconcile: %v", err)
}
return nil
}
| 193 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"os"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
githubToken = "GITHUB_TOKEN"
eksaGithubTokenEnv = "EKSA_GITHUB_TOKEN"
validPATValue = "ghp_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
githubProvider = "github"
gitProvider = "git"
validPassword = "testPassword"
validPrivateKeyfilePath = "testdata/nonemptyprivatekey"
validGitKnownHostsFilePath = "testdata/known_hosts"
)
func setupFluxContext(t *testing.T) {
t.Setenv(eksaGithubTokenEnv, validPATValue)
t.Setenv(githubToken, os.Getenv(eksaGithubTokenEnv))
}
func TestFluxInstallGithubToolkitsSuccess(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupFluxContext(t)
owner := "janedoe"
repo := "gitops-fleet"
path := "clusters/cluster-name"
tests := []struct {
testName string
cluster *types.Cluster
fluxConfig *v1alpha1.FluxConfig
wantExecArgs []interface{}
}{
{
testName: "with kubeconfig",
cluster: &types.Cluster{
KubeconfigFile: "f.kubeconfig",
},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
Github: &v1alpha1.GithubProviderConfig{
Owner: owner,
Repository: repo,
},
},
},
wantExecArgs: []interface{}{
"bootstrap", githubProvider, "--repository", repo, "--owner", owner, "--path", path, "--ssh-key-algorithm", "ecdsa", "--kubeconfig", "f.kubeconfig",
},
},
{
testName: "with personal",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
Github: &v1alpha1.GithubProviderConfig{
Owner: owner,
Repository: repo,
Personal: true,
},
},
},
wantExecArgs: []interface{}{
"bootstrap", githubProvider, "--repository", repo, "--owner", owner, "--path", path, "--ssh-key-algorithm", "ecdsa", "--personal",
},
},
{
testName: "with branch",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
Branch: "main",
Github: &v1alpha1.GithubProviderConfig{
Owner: owner,
Repository: repo,
},
},
},
wantExecArgs: []interface{}{
"bootstrap", githubProvider, "--repository", repo, "--owner", owner, "--path", path, "--ssh-key-algorithm", "ecdsa", "--branch", "main",
},
},
{
testName: "with namespace",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
SystemNamespace: "flux-system",
Github: &v1alpha1.GithubProviderConfig{
Owner: owner,
Repository: repo,
},
},
},
wantExecArgs: []interface{}{
"bootstrap", githubProvider, "--repository", repo, "--owner", owner, "--path", path, "--ssh-key-algorithm", "ecdsa", "--namespace", "flux-system",
},
},
{
testName: "minimum args",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"bootstrap", githubProvider, "--repository", "", "--owner", "", "--path", "", "--ssh-key-algorithm", "ecdsa",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
env := map[string]string{githubToken: validPATValue}
executable.EXPECT().ExecuteWithEnv(
ctx,
env,
tt.wantExecArgs...,
).Return(bytes.Buffer{}, nil)
f := executables.NewFlux(executable)
if err := f.BootstrapGithub(ctx, tt.cluster, tt.fluxConfig); err != nil {
t.Errorf("flux.BootstrapGithub() error = %v, want nil", err)
}
})
}
}
func TestFluxUninstallGitOpsToolkitsComponents(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupFluxContext(t)
tests := []struct {
testName string
cluster *types.Cluster
fluxConfig *v1alpha1.FluxConfig
wantExecArgs []interface{}
}{
{
testName: "minimum args",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{},
wantExecArgs: []interface{}{
"uninstall", "--silent",
},
},
{
testName: "with kubeconfig",
cluster: &types.Cluster{
KubeconfigFile: "f.kubeconfig",
},
fluxConfig: &v1alpha1.FluxConfig{},
wantExecArgs: []interface{}{
"uninstall", "--silent", "--kubeconfig", "f.kubeconfig",
},
},
{
testName: "with namespace",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "flux-system",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"uninstall", "--silent", "--namespace", "flux-system",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(
ctx,
tt.wantExecArgs...,
).Return(bytes.Buffer{}, nil)
f := executables.NewFlux(executable)
if err := f.Uninstall(ctx, tt.cluster, tt.fluxConfig); err != nil {
t.Errorf("flux.Uninstall() error = %v, want nil", err)
}
})
}
}
func TestFluxPauseKustomization(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupFluxContext(t)
tests := []struct {
testName string
cluster *types.Cluster
fluxConfig *v1alpha1.FluxConfig
wantExecArgs []interface{}
}{
{
testName: "minimum args",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "flux-system",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"suspend", "ks", "flux-system", "--namespace", "flux-system",
},
},
{
testName: "with kubeconfig",
cluster: &types.Cluster{
KubeconfigFile: "f.kubeconfig",
},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "flux-system",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"suspend", "ks", "flux-system", "--namespace", "flux-system", "--kubeconfig", "f.kubeconfig",
},
},
{
testName: "with namespace",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "custom-ns",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"suspend", "ks", "custom-ns", "--namespace", "custom-ns",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(
ctx,
tt.wantExecArgs...,
).Return(bytes.Buffer{}, nil)
f := executables.NewFlux(executable)
if err := f.SuspendKustomization(ctx, tt.cluster, tt.fluxConfig); err != nil {
t.Errorf("flux.SuspendKustomization() error = %v, want nil", err)
}
})
}
}
func TestFluxResumeKustomization(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupFluxContext(t)
tests := []struct {
testName string
cluster *types.Cluster
fluxConfig *v1alpha1.FluxConfig
wantExecArgs []interface{}
}{
{
testName: "minimum args",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "flux-system",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"resume", "ks", "flux-system", "--namespace", "flux-system",
},
},
{
testName: "with kubeconfig",
cluster: &types.Cluster{
KubeconfigFile: "f.kubeconfig",
},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "flux-system",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"resume", "ks", "flux-system", "--namespace", "flux-system", "--kubeconfig", "f.kubeconfig",
},
},
{
testName: "with namespace",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "custom-ns",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"resume", "ks", "custom-ns", "--namespace", "custom-ns",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(
ctx,
tt.wantExecArgs...,
).Return(bytes.Buffer{}, nil)
f := executables.NewFlux(executable)
if err := f.ResumeKustomization(ctx, tt.cluster, tt.fluxConfig); err != nil {
t.Errorf("flux.ResumeKustomization() error = %v, want nil", err)
}
})
}
}
func TestFluxReconcile(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupFluxContext(t)
tests := []struct {
testName string
cluster *types.Cluster
fluxConfig *v1alpha1.FluxConfig
wantExecArgs []interface{}
}{
{
testName: "minimum args",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{},
wantExecArgs: []interface{}{
"reconcile", "source", "git", "flux-system",
},
},
{
testName: "with kubeconfig",
cluster: &types.Cluster{
KubeconfigFile: "f.kubeconfig",
},
fluxConfig: &v1alpha1.FluxConfig{},
wantExecArgs: []interface{}{
"reconcile", "source", "git", "flux-system", "--kubeconfig", "f.kubeconfig",
},
},
{
testName: "with custom namespace",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
SystemNamespace: "custom-ns",
Github: &v1alpha1.GithubProviderConfig{},
},
},
wantExecArgs: []interface{}{
"reconcile", "source", "git", "custom-ns", "--namespace", "custom-ns",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(
ctx,
tt.wantExecArgs...,
).Return(bytes.Buffer{}, nil)
f := executables.NewFlux(executable)
if err := f.Reconcile(ctx, tt.cluster, tt.fluxConfig); err != nil {
t.Errorf("flux.Reconcile() error = %v, want nil", err)
}
})
}
}
func TestFluxInstallGitToolkitsSuccess(t *testing.T) {
mockCtrl := gomock.NewController(t)
repoUrl := "ssh://[email protected]/repository.git"
path := "clusters/cluster-name"
privateKeyFilePath := validPrivateKeyfilePath
password := validPassword
envmap := map[string]string{"SSH_KNOWN_HOSTS": validGitKnownHostsFilePath}
tests := []struct {
testName string
cluster *types.Cluster
fluxConfig *v1alpha1.FluxConfig
wantExecArgs []interface{}
cliConfig *config.CliConfig
}{
{
testName: "with kubeconfig",
cluster: &types.Cluster{
KubeconfigFile: "f.kubeconfig",
},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
Git: &v1alpha1.GitProviderConfig{
RepositoryUrl: repoUrl,
},
},
},
wantExecArgs: []interface{}{
"bootstrap", gitProvider, "--url", repoUrl, "--path", path, "--private-key-file", privateKeyFilePath, "--silent", "--kubeconfig", "f.kubeconfig", "--ssh-key-algorithm", "ecdsa", "--password", password,
},
cliConfig: &config.CliConfig{
GitSshKeyPassphrase: validPassword,
GitPrivateKeyFile: validPrivateKeyfilePath,
GitKnownHostsFile: validGitKnownHostsFilePath,
},
},
{
testName: "with branch",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
Branch: "main",
Git: &v1alpha1.GitProviderConfig{
RepositoryUrl: repoUrl,
},
},
},
wantExecArgs: []interface{}{
"bootstrap", gitProvider, "--url", repoUrl, "--path", path, "--private-key-file", privateKeyFilePath, "--silent", "--branch", "main",
"--ssh-key-algorithm", "ecdsa",
},
cliConfig: &config.CliConfig{
GitSshKeyPassphrase: "",
GitPrivateKeyFile: validPrivateKeyfilePath,
GitKnownHostsFile: validGitKnownHostsFilePath,
},
},
{
testName: "with namespace",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
SystemNamespace: "flux-system",
Git: &v1alpha1.GitProviderConfig{
RepositoryUrl: repoUrl,
},
},
},
wantExecArgs: []interface{}{
"bootstrap", gitProvider, "--url", repoUrl, "--path", path, "--private-key-file", privateKeyFilePath, "--silent", "--namespace", "flux-system",
"--ssh-key-algorithm", "ecdsa", "--password", password,
},
cliConfig: &config.CliConfig{
GitSshKeyPassphrase: validPassword,
GitPrivateKeyFile: validPrivateKeyfilePath,
GitKnownHostsFile: validGitKnownHostsFilePath,
},
},
{
testName: "with ssh key algorithm",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
ClusterConfigPath: path,
Git: &v1alpha1.GitProviderConfig{
RepositoryUrl: repoUrl,
SshKeyAlgorithm: "rsa",
},
},
},
wantExecArgs: []interface{}{
"bootstrap", gitProvider, "--url", repoUrl, "--path", path, "--private-key-file", privateKeyFilePath, "--silent",
"--ssh-key-algorithm", "rsa", "--password", password,
},
cliConfig: &config.CliConfig{
GitSshKeyPassphrase: validPassword,
GitPrivateKeyFile: validPrivateKeyfilePath,
GitKnownHostsFile: validGitKnownHostsFilePath,
},
},
{
testName: "minimum args",
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Git: &v1alpha1.GitProviderConfig{},
},
},
cliConfig: &config.CliConfig{
GitSshKeyPassphrase: validPassword,
GitPrivateKeyFile: validPrivateKeyfilePath,
GitKnownHostsFile: validGitKnownHostsFilePath,
},
wantExecArgs: []interface{}{
"bootstrap", gitProvider, "--url", "", "--path", "", "--private-key-file", privateKeyFilePath, "--silent", "--ssh-key-algorithm", "ecdsa",
"--password", password,
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(
ctx,
envmap,
tt.wantExecArgs...,
).Return(bytes.Buffer{}, nil)
f := executables.NewFlux(executable)
if err := f.BootstrapGit(ctx, tt.cluster, tt.fluxConfig, tt.cliConfig); err != nil {
t.Errorf("flux.BootstrapGit() error = %v, want nil", err)
}
})
}
}
| 560 |
eks-anywhere | aws | Go | package executables
import (
"bufio"
"bytes"
"context"
"crypto/tls"
_ "embed"
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
govcPath = "govc"
govcUsernameKey = "GOVC_USERNAME"
govcPasswordKey = "GOVC_PASSWORD"
govcURLKey = "GOVC_URL"
govcInsecure = "GOVC_INSECURE"
govcDatacenterKey = "GOVC_DATACENTER"
govcTlsHostsFile = "govc_known_hosts"
govcTlsKnownHostsKey = "GOVC_TLS_KNOWN_HOSTS"
vSphereServerKey = "VSPHERE_SERVER"
byteToGiB = 1073741824.0
DeployOptsFile = "deploy-opts.json"
disk1 = "Hard disk 1"
disk2 = "Hard disk 2"
)
var requiredEnvs = []string{govcUsernameKey, govcPasswordKey, govcURLKey, govcInsecure, govcDatacenterKey}
type networkMapping struct {
Name string `json:"Name,omitempty"`
Network string `json:"Network,omitempty"`
}
type deployOption struct {
DiskProvisioning string `json:"DiskProvisioning,omitempty"`
NetworkMapping []networkMapping `json:"NetworkMapping,omitempty"`
}
type FolderType string
const (
datastore FolderType = "datastore"
vm FolderType = "vm"
maxRetries = 5
backOffPeriod = 5 * time.Second
)
type Govc struct {
writer filewriter.FileWriter
Executable
*retrier.Retrier
requiredEnvs *syncSlice
envMap map[string]string
}
type GovcOpt func(*Govc)
func NewGovc(executable Executable, writer filewriter.FileWriter, opts ...GovcOpt) *Govc {
envVars := newSyncSlice()
envVars.append(requiredEnvs...)
g := &Govc{
writer: writer,
Executable: executable,
Retrier: retrier.NewWithMaxRetries(maxRetries, backOffPeriod),
requiredEnvs: envVars,
}
for _, opt := range opts {
opt(g)
}
return g
}
func WithGovcEnvMap(envMap map[string]string) GovcOpt {
return func(g *Govc) {
g.envMap = envMap
}
}
func (g *Govc) exec(ctx context.Context, args ...string) (stdout bytes.Buffer, err error) {
envMap, err := g.validateAndSetupCreds()
if err != nil {
return bytes.Buffer{}, fmt.Errorf("failed govc validations: %v", err)
}
return g.ExecuteWithEnv(ctx, envMap, args...)
}
func (g *Govc) Close(ctx context.Context) error {
if g == nil {
return nil
}
return g.Logout(ctx)
}
func (g *Govc) Logout(ctx context.Context) error {
logger.V(3).Info("Logging out from current govc session")
if _, err := g.exec(ctx, "session.logout"); err != nil {
return fmt.Errorf("govc returned error when logging out: %v", err)
}
// Commands that skip cert verification will have a different session.
// So we try to destroy it as well here to avoid leaving it orphaned
if _, err := g.exec(ctx, "session.logout", "-k"); err != nil {
return fmt.Errorf("govc returned error when logging out from session without cert verification: %v", err)
}
return nil
}
// SearchTemplate looks for a vm template with the same base name as the provided template path.
// If found, it returns the full qualified path to the template.
// If multiple matching templates are found, it returns an error.
func (g *Govc) SearchTemplate(ctx context.Context, datacenter, template string) (string, error) {
params := []string{"find", "-json", "/" + datacenter, "-type", "VirtualMachine", "-name", filepath.Base(template)}
var templateResponse bytes.Buffer
var err error
err = g.Retry(func() error {
templateResponse, err = g.exec(ctx, params...)
return err
})
if err != nil {
return "", fmt.Errorf("getting template: %v", err)
}
templateJson := templateResponse.String()
templateJson = strings.TrimSuffix(templateJson, "\n")
if templateJson == "null" || templateJson == "" {
logger.V(2).Info(fmt.Sprintf("Template not found: %s", template))
return "", nil
}
templateInfo := make([]string, 0)
if err = json.Unmarshal([]byte(templateJson), &templateInfo); err != nil {
logger.V(2).Info(fmt.Sprintf("Failed unmarshalling govc response: %s, %v", templateJson, err))
return "", nil
}
bTemplateFound := false
var foundTemplate string
for _, t := range templateInfo {
if strings.HasSuffix(t, template) {
if bTemplateFound {
return "", fmt.Errorf("specified template '%s' maps to multiple paths within the datacenter '%s'", template, datacenter)
}
bTemplateFound = true
foundTemplate = t
}
}
if !bTemplateFound {
logger.V(2).Info(fmt.Sprintf("Template '%s' not found", template))
return "", nil
}
return foundTemplate, nil
}
func (g *Govc) LibraryElementExists(ctx context.Context, library string) (bool, error) {
response, err := g.exec(ctx, "library.ls", library)
if err != nil {
return false, fmt.Errorf("govc failed getting library to check if it exists: %v", err)
}
return response.Len() > 0, nil
}
type libElement struct {
ContentVersion string `json:"content_version"`
}
func (g *Govc) GetLibraryElementContentVersion(ctx context.Context, element string) (string, error) {
response, err := g.exec(ctx, "library.info", "-json", element)
if err != nil {
return "", fmt.Errorf("govc failed getting library element info: %v", err)
}
elementInfoJson := response.String()
elementInfoJson = strings.TrimSuffix(elementInfoJson, "\n")
if elementInfoJson == "null" {
return "-1", nil
}
elementInfo := make([]libElement, 0)
err = yaml.Unmarshal([]byte(elementInfoJson), &elementInfo)
if err != nil {
return "", fmt.Errorf("unmarshalling library element info: %v", err)
}
if len(elementInfo) == 0 {
return "", fmt.Errorf("govc failed to return element info for library element %v", element)
}
return elementInfo[0].ContentVersion, nil
}
func (g *Govc) DeleteLibraryElement(ctx context.Context, element string) error {
_, err := g.exec(ctx, "library.rm", element)
if err != nil {
return fmt.Errorf("govc failed deleting library item: %v", err)
}
return nil
}
func (g *Govc) ResizeDisk(ctx context.Context, datacenter, template, diskName string, diskSizeInGB int) error {
_, err := g.exec(ctx, "vm.disk.change", "-dc", datacenter, "-vm", template, "-disk.name", diskName, "-size", strconv.Itoa(diskSizeInGB)+"G")
if err != nil {
return fmt.Errorf("failed to resize disk %s: %v", diskName, err)
}
return nil
}
type deviceInfoResponse struct {
Devices []VirtualDevice
}
// VirtualDevice describes a virtual device for a VM.
type VirtualDevice struct {
Name string
DeviceInfo deviceInfo
CapacityInKB float64
}
type deviceInfo struct {
Label string
}
// DevicesInfo returns the device info for te provided virtual machine.
func (g *Govc) DevicesInfo(ctx context.Context, datacenter, template string, args ...string) ([]VirtualDevice, error) {
params := []string{"device.info", "-dc", datacenter, "-vm", template, "-json"}
params = append(params, args...)
response, err := g.exec(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting template device information: %v", err)
}
var devicesInfo deviceInfoResponse
err = yaml.Unmarshal(response.Bytes(), &devicesInfo)
if err != nil {
return nil, fmt.Errorf("unmarshalling devices info: %v", err)
}
return devicesInfo.Devices, nil
}
// GetVMDiskSizeInGB returns the size of the first disk on the VM in GB.
func (g *Govc) GetVMDiskSizeInGB(ctx context.Context, vm, datacenter string) (int, error) {
devicesInfo, err := g.DevicesInfo(ctx, datacenter, vm, "disk-*")
if err != nil {
return 0, fmt.Errorf("getting disk size for vm %s: %v", vm, err)
}
if len(devicesInfo) == 0 {
return 0, fmt.Errorf("no disks found for vm %s", vm)
}
return int(devicesInfo[0].CapacityInKB / 1024 / 1024), nil
}
// GetHardDiskSize returns the size of all the hard disks for given VM.
func (g *Govc) GetHardDiskSize(ctx context.Context, vm, datacenter string) (map[string]float64, error) {
devicesInfo, err := g.DevicesInfo(ctx, datacenter, vm, "disk-*")
if err != nil {
return nil, fmt.Errorf("getting hard disk sizes for vm %s: %v", vm, err)
}
if len(devicesInfo) == 0 {
return nil, fmt.Errorf("no hard disks found for vm %s", vm)
}
hardDiskMap := make(map[string]float64)
for _, deviceInfo := range devicesInfo {
if strings.EqualFold(deviceInfo.DeviceInfo.Label, disk1) {
hardDiskMap[disk1] = deviceInfo.CapacityInKB
} else if strings.EqualFold(deviceInfo.DeviceInfo.Label, disk2) {
hardDiskMap[disk2] = deviceInfo.CapacityInKB
}
}
return hardDiskMap, nil
}
func (g *Govc) TemplateHasSnapshot(ctx context.Context, template string) (bool, error) {
envMap, err := g.validateAndSetupCreds()
if err != nil {
return false, fmt.Errorf("failed govc validations: %v", err)
}
params := []string{"snapshot.tree", "-vm", template}
snap, err := g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
return false, fmt.Errorf("failed to get snapshot details: %v", err)
}
if snap.String() == "" {
return false, nil
}
return true, nil
}
type datastoreResponse struct {
Datastores []types.Datastores `json:"Datastores"`
}
func (g *Govc) GetWorkloadAvailableSpace(ctx context.Context, datastore string) (float64, error) {
envMap, err := g.validateAndSetupCreds()
if err != nil {
return 0, fmt.Errorf("failed govc validations: %v", err)
}
params := []string{"datastore.info", "-json=true", datastore}
result, err := g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
return 0, fmt.Errorf("getting datastore info: %v", err)
}
response := &datastoreResponse{}
err = json.Unmarshal(result.Bytes(), response)
if err != nil {
return -1, nil
}
if len(response.Datastores) > 0 {
freeSpace := response.Datastores[0].Info.FreeSpace
spaceGiB := freeSpace / byteToGiB
return spaceGiB, nil
}
return 0, fmt.Errorf("getting datastore available space response: %v", err)
}
func (g *Govc) CreateLibrary(ctx context.Context, datastore, library string) error {
if _, err := g.exec(ctx, "library.create", "-ds", datastore, library); err != nil {
return fmt.Errorf("creating library %s: %v", library, err)
}
return nil
}
func (g *Govc) DeployTemplateFromLibrary(ctx context.Context, templateDir, templateName, library, datacenter, datastore, network, resourcePool string, resizeBRDisk bool) error {
logger.V(4).Info("Deploying template", "dir", templateDir, "templateName", templateName)
if err := g.DeployTemplate(ctx, library, templateName, templateName, templateDir, datacenter, datastore, network, resourcePool, nil); err != nil {
return err
}
if resizeBRDisk {
// Get devices information template to identify second disk properly
logger.V(4).Info("Getting devices info for template")
devicesInfo, err := g.DevicesInfo(ctx, datacenter, templateName)
if err != nil {
return err
}
// For 1.22 we switched to using one disk for BR, so it for now as long as the boolean is set, and we only see
// one disk, we can assume this is for 1.22. This loop would need to change if that assumption changes
// in the future, but 1.20 and 1.21 are still using dual disks which is why we need to check for the second
// disk first. Since this loop will get all kinds of devices and not just hard disks, we need to do these
// checks based on the label.
disk1 := ""
disk2 := ""
for _, device := range devicesInfo {
deviceLabel := device.DeviceInfo.Label
// Get the name of the hard disk and resize the disk to 20G
if strings.EqualFold(deviceLabel, "Hard disk 1") {
disk1 = device.Name
} else if strings.EqualFold(deviceLabel, "Hard disk 2") {
disk2 = device.Name
break
}
}
diskName := ""
var diskSizeInGB int
if disk2 != "" {
logger.V(4).Info("Resizing disk 2 of template to 20G")
diskName = disk2
diskSizeInGB = 20
} else if disk1 != "" {
logger.V(4).Info("Resizing disk 1 of template to 22G")
diskName = disk1
diskSizeInGB = 22
} else {
return fmt.Errorf("template %v is not valid as there are no associated disks", templateName)
}
err = g.ResizeDisk(ctx, datacenter, templateName, diskName, diskSizeInGB)
if err != nil {
return fmt.Errorf("resizing disk %v to %dG: %v", diskName, diskSizeInGB, err)
}
}
templateFullPath := filepath.Join(templateDir, templateName)
logger.V(4).Info("Taking template snapshot", "templateName", templateFullPath)
if err := g.createVMSnapshot(ctx, datacenter, templateFullPath); err != nil {
return err
}
logger.V(4).Info("Marking vm as template", "templateName", templateFullPath)
if err := g.markVMAsTemplate(ctx, datacenter, templateFullPath); err != nil {
return err
}
return nil
}
func (g *Govc) ImportTemplate(ctx context.Context, library, ovaURL, name string) error {
logger.V(4).Info("Importing template", "ova", ovaURL, "templateName", name)
if _, err := g.exec(ctx, "library.import", "-k", "-pull", "-n", name, library, ovaURL); err != nil {
return fmt.Errorf("importing template: %v", err)
}
return nil
}
func (g *Govc) DeployTemplate(ctx context.Context, library, templateName, vmName, deployFolder, datacenter, datastore, network, resourcePool string, deployOptionsOverride []byte) error {
envMap, err := g.validateAndSetupCreds()
if err != nil {
return fmt.Errorf("failed govc validations: %v", err)
}
templateInLibraryPath := filepath.Join(library, templateName)
if !filepath.IsAbs(templateInLibraryPath) {
templateInLibraryPath = fmt.Sprintf("/%s", templateInLibraryPath)
}
deployOpts, err := getDeployOptions(network)
if err != nil {
return err
}
if len(deployOptionsOverride) > 0 {
deployOpts = deployOptionsOverride
}
deployOptsPath, err := g.writer.Write(DeployOptsFile, deployOpts, filewriter.PersistentFile)
if err != nil {
return fmt.Errorf("failed writing deploy options file to disk: %v", err)
}
bFolderNotFound := false
params := []string{"folder.info", deployFolder}
err = g.Retry(func() error {
errBuffer, err := g.ExecuteWithEnv(ctx, envMap, params...)
errString := strings.ToLower(errBuffer.String())
if err != nil {
if !strings.Contains(errString, "not found") {
return fmt.Errorf("obtaining folder information: %v", err)
} else {
bFolderNotFound = true
}
}
return nil
})
if err != nil || bFolderNotFound {
params = []string{"folder.create", deployFolder}
err = g.Retry(func() error {
errBuffer, err := g.ExecuteWithEnv(ctx, envMap, params...)
errString := strings.ToLower(errBuffer.String())
if err != nil && !strings.Contains(errString, "already exists") {
return fmt.Errorf("creating folder: %v", err)
}
return nil
})
if err != nil {
return fmt.Errorf("creating folder: %v", err)
}
}
params = []string{
"library.deploy",
"-dc", datacenter,
"-ds", datastore,
"-pool", resourcePool,
"-folder", deployFolder,
"-options", deployOptsPath,
templateInLibraryPath, vmName,
}
if _, err := g.exec(ctx, params...); err != nil {
return fmt.Errorf("deploying template: %v", err)
}
return nil
}
func (g *Govc) DeleteTemplate(ctx context.Context, resourcePool, templatePath string) error {
if err := g.markAsVM(ctx, resourcePool, templatePath); err != nil {
return err
}
if err := g.removeSnapshotsFromVM(ctx, templatePath); err != nil {
return err
}
if err := g.deleteVM(ctx, templatePath); err != nil {
return err
}
return nil
}
func (g *Govc) markAsVM(ctx context.Context, resourcePool, path string) error {
if _, err := g.exec(ctx, "vm.markasvm", "-pool", resourcePool, path); err != nil {
return fmt.Errorf("failed marking as vm: %v", err)
}
return nil
}
func (g *Govc) removeSnapshotsFromVM(ctx context.Context, path string) error {
if _, err := g.exec(ctx, "snapshot.remove", "-vm", path, "*"); err != nil {
return fmt.Errorf("removing snapshots from vm: %v", err)
}
return nil
}
func (g *Govc) deleteVM(ctx context.Context, path string) error {
if _, err := g.exec(ctx, "vm.destroy", path); err != nil {
return fmt.Errorf("deleting vm: %v", err)
}
return nil
}
func (g *Govc) createVMSnapshot(ctx context.Context, datacenter, name string) error {
if _, err := g.exec(ctx, "snapshot.create", "-dc", datacenter, "-m=false", "-vm", name, "root"); err != nil {
return fmt.Errorf("govc failed taking vm snapshot: %v", err)
}
return nil
}
func (g *Govc) markVMAsTemplate(ctx context.Context, datacenter, vmName string) error {
if _, err := g.exec(ctx, "vm.markastemplate", "-dc", datacenter, vmName); err != nil {
return fmt.Errorf("marking VM as template: %v", err)
}
return nil
}
func (g *Govc) getEnvMap() (map[string]string, error) {
if g.envMap != nil {
return g.envMap, nil
}
envMap := make(map[string]string)
for key := range g.requiredEnvs.iterate() {
if env, ok := os.LookupEnv(key); ok && len(env) > 0 {
envMap[key] = env
} else {
if key != govcInsecure {
return nil, fmt.Errorf("warning required env not set %s", key)
}
err := os.Setenv(govcInsecure, "false")
if err != nil {
logger.Info("Warning: Unable to set <%s>", govcInsecure)
}
}
}
return envMap, nil
}
func (g *Govc) validateAndSetupCreds() (map[string]string, error) {
if g.envMap != nil {
return g.envMap, nil
}
var vSphereUsername, vSpherePassword, vSphereURL string
var ok bool
var envMap map[string]string
if vSphereUsername, ok = os.LookupEnv(config.EksavSphereUsernameKey); ok && len(vSphereUsername) > 0 {
if err := os.Setenv(govcUsernameKey, vSphereUsername); err != nil {
return nil, fmt.Errorf("unable to set %s: %v", govcUsernameKey, err)
}
} else if govcUsername, ok := os.LookupEnv(govcUsernameKey); !ok || len(govcUsername) <= 0 {
return nil, fmt.Errorf("%s is not set or is empty: %t", govcUsernameKey, ok)
}
if vSpherePassword, ok = os.LookupEnv(config.EksavSpherePasswordKey); ok && len(vSpherePassword) > 0 {
if err := os.Setenv(govcPasswordKey, vSpherePassword); err != nil {
return nil, fmt.Errorf("unable to set %s: %v", govcPasswordKey, err)
}
} else if govcPassword, ok := os.LookupEnv(govcPasswordKey); !ok || len(govcPassword) <= 0 {
return nil, fmt.Errorf("%s is not set or is empty: %t", govcPasswordKey, ok)
}
if vSphereURL, ok = os.LookupEnv(vSphereServerKey); ok && len(vSphereURL) > 0 {
if err := os.Setenv(govcURLKey, vSphereURL); err != nil {
return nil, fmt.Errorf("unable to set %s: %v", govcURLKey, err)
}
} else if govcURL, ok := os.LookupEnv(govcURLKey); !ok || len(govcURL) <= 0 {
return nil, fmt.Errorf("%s is not set or is empty: %t", govcURLKey, ok)
}
if govcDatacenter, ok := os.LookupEnv(govcDatacenterKey); !ok || len(govcDatacenter) <= 0 {
return nil, fmt.Errorf("%s is not set or is empty: %t", govcDatacenterKey, ok)
}
envMap, err := g.getEnvMap()
if err != nil {
return nil, fmt.Errorf("%v", err)
}
return envMap, nil
}
func (g *Govc) CleanupVms(ctx context.Context, clusterName string, dryRun bool) error {
envMap, err := g.validateAndSetupCreds()
if err != nil {
return fmt.Errorf("failed govc validations: %v", err)
}
var params []string
var result bytes.Buffer
params = strings.Fields("find /" + envMap[govcDatacenterKey] + " -type VirtualMachine -name " + clusterName + "*")
result, err = g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
return fmt.Errorf("getting vm list: %v", err)
}
scanner := bufio.NewScanner(strings.NewReader(result.String()))
for scanner.Scan() {
vmName := scanner.Text()
if dryRun {
logger.Info("Found ", "vm_name", vmName)
continue
}
params = strings.Fields("vm.power -off -force " + vmName)
result, err = g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
logger.Info("WARN: Failed to power off vm ", "vm_name", vmName, "error", err)
}
params = strings.Fields("object.destroy " + vmName)
result, err = g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
logger.Info("WARN: Failed to delete vm ", "vm_name", vmName, "error", err)
} else {
logger.Info("Deleted ", "vm_name", vmName)
}
}
if err := scanner.Err(); err != nil {
return fmt.Errorf("failure reading output of vm list")
}
return nil
}
func (g *Govc) ValidateVCenterConnection(ctx context.Context, server string) error {
skipVerifyTransport := http.DefaultTransport.(*http.Transport).Clone()
skipVerifyTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client := &http.Client{Transport: skipVerifyTransport}
if _, err := client.Get("https://" + server); err != nil {
return fmt.Errorf("failed to reach server %s: %v", server, err)
}
return nil
}
func (g *Govc) ValidateVCenterAuthentication(ctx context.Context) error {
err := g.Retry(func() error {
_, err := g.exec(ctx, "about", "-k")
return err
})
if err != nil {
return fmt.Errorf("vSphere authentication failed: %v", err)
}
return nil
}
func (g *Govc) IsCertSelfSigned(ctx context.Context) bool {
_, err := g.exec(ctx, "about")
return err != nil
}
func (g *Govc) GetCertThumbprint(ctx context.Context) (string, error) {
bufferResponse, err := g.exec(ctx, "about.cert", "-thumbprint", "-k")
if err != nil {
return "", fmt.Errorf("unable to retrieve thumbprint: %v", err)
}
data := strings.Split(strings.Trim(bufferResponse.String(), "\n"), " ")
if len(data) != 2 {
return "", fmt.Errorf("invalid thumbprint format")
}
return data[1], nil
}
func (g *Govc) ConfigureCertThumbprint(ctx context.Context, server, thumbprint string) error {
path, err := g.writer.Write(filepath.Base(govcTlsHostsFile), []byte(fmt.Sprintf("%s %s", server, thumbprint)))
if err != nil {
return fmt.Errorf("writing to file %s: %v", govcTlsHostsFile, err)
}
if err = os.Setenv(govcTlsKnownHostsKey, path); err != nil {
return fmt.Errorf("unable to set %s: %v", govcTlsKnownHostsKey, err)
}
g.requiredEnvs.append(govcTlsKnownHostsKey)
return nil
}
func (g *Govc) DatacenterExists(ctx context.Context, datacenter string) (bool, error) {
exists := false
err := g.Retry(func() error {
result, err := g.exec(ctx, "datacenter.info", datacenter)
if err == nil {
exists = true
return nil
}
if strings.HasSuffix(result.String(), "not found") {
exists = false
return nil
}
return err
})
if err != nil {
return false, fmt.Errorf("failed to get datacenter: %v", err)
}
return exists, nil
}
func (g *Govc) NetworkExists(ctx context.Context, network string) (bool, error) {
exists := false
err := g.Retry(func() error {
networkResponse, err := g.exec(ctx, "find", "-maxdepth=1", filepath.Dir(network), "-type", "n", "-name", filepath.Base(network))
if err != nil {
return err
}
if networkResponse.String() == "" {
exists = false
return nil
}
exists = true
return nil
})
if err != nil {
return false, fmt.Errorf("failed checking if network '%s' exists: %v", network, err)
}
return exists, nil
}
func (g *Govc) ValidateVCenterSetupMachineConfig(ctx context.Context, datacenterConfig *v1alpha1.VSphereDatacenterConfig, machineConfig *v1alpha1.VSphereMachineConfig, _ *bool) error {
envMap, err := g.validateAndSetupCreds()
if err != nil {
return fmt.Errorf("failed govc validations: %v", err)
}
machineConfig.Spec.Datastore, err = prependPath(datastore, machineConfig.Spec.Datastore, datacenterConfig.Spec.Datacenter)
if err != nil {
return err
}
params := []string{"datastore.info", machineConfig.Spec.Datastore}
err = g.Retry(func() error {
_, err = g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
datastorePath := filepath.Dir(machineConfig.Spec.Datastore)
isValidDatastorePath := g.isValidPath(ctx, envMap, datastorePath)
if isValidDatastorePath {
leafDir := filepath.Base(machineConfig.Spec.Datastore)
return fmt.Errorf("valid path, but '%s' is not a datastore", leafDir)
} else {
return fmt.Errorf("failed to get datastore: %v", err)
}
}
return nil
})
if err != nil {
return fmt.Errorf("failed to get datastore: %v", err)
}
logger.MarkPass("Datastore validated")
if len(machineConfig.Spec.Folder) > 0 {
machineConfig.Spec.Folder, err = prependPath(vm, machineConfig.Spec.Folder, datacenterConfig.Spec.Datacenter)
if err != nil {
return err
}
params = []string{"folder.info", machineConfig.Spec.Folder}
err = g.Retry(func() error {
_, err := g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
err = g.createFolder(ctx, envMap, machineConfig)
if err != nil {
currPath := "/" + datacenterConfig.Spec.Datacenter + "/"
dirs := strings.Split(machineConfig.Spec.Folder, "/")
for _, dir := range dirs[2:] {
currPath += dir + "/"
if !g.isValidPath(ctx, envMap, currPath) {
return fmt.Errorf("%s is an invalid intermediate directory", currPath)
}
}
return err
}
}
return nil
})
if err != nil {
return fmt.Errorf("failed to get folder: %v", err)
}
logger.MarkPass("Folder validated")
}
var poolInfoResponse bytes.Buffer
params = []string{"find", "-json", "/" + datacenterConfig.Spec.Datacenter, "-type", "p", "-name", filepath.Base(machineConfig.Spec.ResourcePool)}
err = g.Retry(func() error {
poolInfoResponse, err = g.ExecuteWithEnv(ctx, envMap, params...)
return err
})
if err != nil {
return fmt.Errorf("getting resource pool: %v", err)
}
poolInfoJson := poolInfoResponse.String()
poolInfoJson = strings.TrimSuffix(poolInfoJson, "\n")
if poolInfoJson == "null" || poolInfoJson == "" {
return fmt.Errorf("resource pool '%s' not found", machineConfig.Spec.ResourcePool)
}
poolInfo := make([]string, 0)
if err = json.Unmarshal([]byte(poolInfoJson), &poolInfo); err != nil {
return fmt.Errorf("failed unmarshalling govc response: %v", err)
}
machineConfig.Spec.ResourcePool = strings.TrimPrefix(machineConfig.Spec.ResourcePool, "*/")
bPoolFound := false
var foundPool string
for _, p := range poolInfo {
if strings.HasSuffix(p, machineConfig.Spec.ResourcePool) {
if bPoolFound {
return fmt.Errorf("specified resource pool '%s' maps to multiple paths within the datacenter '%s'", machineConfig.Spec.ResourcePool, datacenterConfig.Spec.Datacenter)
}
bPoolFound = true
foundPool = p
}
}
if !bPoolFound {
return fmt.Errorf("resource pool '%s' not found", machineConfig.Spec.ResourcePool)
}
machineConfig.Spec.ResourcePool = foundPool
logger.MarkPass("Resource pool validated")
return nil
}
func prependPath(folderType FolderType, folderPath string, datacenter string) (string, error) {
prefix := fmt.Sprintf("/%s", datacenter)
modPath := folderPath
if !strings.HasPrefix(folderPath, prefix) {
modPath = fmt.Sprintf("%s/%s/%s", prefix, folderType, folderPath)
logger.V(4).Info(fmt.Sprintf("Relative %s path specified, using path %s", folderType, modPath))
return modPath, nil
}
prefix += fmt.Sprintf("/%s", folderType)
if !strings.HasPrefix(folderPath, prefix) {
return folderPath, fmt.Errorf("invalid folder type, expected path under %s", prefix)
}
return modPath, nil
}
func (g *Govc) createFolder(ctx context.Context, envMap map[string]string, machineConfig *v1alpha1.VSphereMachineConfig) error {
params := []string{"folder.create", machineConfig.Spec.Folder}
err := g.Retry(func() error {
_, err := g.ExecuteWithEnv(ctx, envMap, params...)
if err != nil {
return fmt.Errorf("creating folder: %v", err)
}
return nil
})
return err
}
func (g *Govc) isValidPath(ctx context.Context, envMap map[string]string, path string) bool {
params := []string{"folder.info", path}
_, err := g.ExecuteWithEnv(ctx, envMap, params...)
return err == nil
}
func (g *Govc) GetTags(ctx context.Context, path string) ([]string, error) {
var tagsResponse bytes.Buffer
var err error
err = g.Retry(func() error {
tagsResponse, err = g.exec(ctx, "tags.attached.ls", "-json", "-r", path)
return err
})
if err != nil {
return nil, fmt.Errorf("govc returned error when listing tags for %s: %v", path, err)
}
tagsJson := tagsResponse.String()
if tagsJson == "null" {
return nil, nil
}
tags := make([]string, 0)
if err = json.Unmarshal([]byte(tagsJson), &tags); err != nil {
return nil, fmt.Errorf("failed unmarshalling govc response to get tags for %s: %v", path, err)
}
return tags, nil
}
// Tag struct to represent a vSphere Tag.
type Tag struct {
Id string
Name string
CategoryId string `json:"category_id,omitempty"`
}
// ListTags list all vSphere tags in vCenter.
func (g *Govc) ListTags(ctx context.Context) ([]Tag, error) {
tagsResponse, err := g.exec(ctx, "tags.ls", "-json")
if err != nil {
return nil, fmt.Errorf("govc returned error when listing tags: %v", err)
}
tagsJson := tagsResponse.String()
if tagsJson == "null" {
return nil, nil
}
tags := make([]Tag, 0)
if err = json.Unmarshal([]byte(tagsJson), &tags); err != nil {
return nil, fmt.Errorf("failed unmarshalling govc response from list tags: %v", err)
}
return tags, nil
}
func (g *Govc) AddTag(ctx context.Context, path, tag string) error {
if _, err := g.exec(ctx, "tags.attach", tag, path); err != nil {
return fmt.Errorf("govc returned error when attaching tag to %s: %v", path, err)
}
return nil
}
func (g *Govc) CreateTag(ctx context.Context, tag, category string) error {
if _, err := g.exec(ctx, "tags.create", "-c", category, tag); err != nil {
return fmt.Errorf("govc returned error when creating tag %s: %v", tag, err)
}
return nil
}
type category struct {
Id string
Name string
Cardinality string
AssociableTypes []string `json:"associable_types,omitempty"`
}
func (g *Govc) ListCategories(ctx context.Context) ([]string, error) {
categoriesResponse, err := g.exec(ctx, "tags.category.ls", "-json")
if err != nil {
return nil, fmt.Errorf("govc returned error when listing categories: %v", err)
}
categoriesJson := categoriesResponse.String()
if categoriesJson == "null" {
return nil, nil
}
categories := make([]category, 0)
if err = json.Unmarshal([]byte(categoriesJson), &categories); err != nil {
return nil, fmt.Errorf("failed unmarshalling govc response from list categories: %v", err)
}
categoryNames := make([]string, 0, len(categories))
for _, c := range categories {
categoryNames = append(categoryNames, c.Name)
}
return categoryNames, nil
}
type objectType string
const virtualMachine objectType = "VirtualMachine"
func (g *Govc) CreateCategoryForVM(ctx context.Context, name string) error {
return g.createCategory(ctx, name, []objectType{virtualMachine})
}
func (g *Govc) createCategory(ctx context.Context, name string, objectTypes []objectType) error {
params := []string{"tags.category.create"}
for _, t := range objectTypes {
params = append(params, "-t", string(t))
}
params = append(params, name)
if _, err := g.exec(ctx, params...); err != nil {
return fmt.Errorf("govc returned error when creating category %s: %v", name, err)
}
return nil
}
func getDeployOptions(network string) ([]byte, error) {
deployOptsStruct := deployOption{
DiskProvisioning: "thin",
NetworkMapping: []networkMapping{
{
Name: "nic0", // needed for Ubuntu
Network: network,
},
{
Name: "VM Network", // needed for Bottlerocket
Network: network,
},
},
}
deployOpts, err := json.Marshal(deployOptsStruct)
if err != nil {
return nil, fmt.Errorf("marshalling template deployment options: %v", err)
}
return deployOpts, err
}
// CreateUser creates a user.
func (g *Govc) CreateUser(ctx context.Context, username string, password string) error {
params := []string{
"sso.user.create", "-p", password, username,
}
if _, err := g.exec(ctx, params...); err != nil {
return fmt.Errorf("govc returned error %v", err)
}
return nil
}
// UserExists checks if a user exists.
func (g *Govc) UserExists(ctx context.Context, username string) (bool, error) {
params := []string{
"sso.user.ls",
username,
}
response, err := g.exec(ctx, params...)
if err != nil {
return false, err
}
return response.Len() > 0, nil
}
// CreateGroup creates a group.
func (g *Govc) CreateGroup(ctx context.Context, name string) error {
params := []string{
"sso.group.create", name,
}
if _, err := g.exec(ctx, params...); err != nil {
return fmt.Errorf("govc returned error %v", err)
}
return nil
}
// GroupExists checks if a group exists.
func (g *Govc) GroupExists(ctx context.Context, name string) (bool, error) {
params := []string{
"sso.group.ls",
name,
}
response, err := g.exec(ctx, params...)
if err != nil {
return false, err
}
return response.Len() > 0, nil
}
// AddUserToGroup adds a user to a group.
func (g *Govc) AddUserToGroup(ctx context.Context, name string, username string) error {
params := []string{
"sso.group.update",
"-a", username,
name,
}
if _, err := g.exec(ctx, params...); err != nil {
return fmt.Errorf("govc returned error %v", err)
}
return nil
}
// RoleExists checks if a role exists.
func (g *Govc) RoleExists(ctx context.Context, name string) (bool, error) {
params := []string{
"role.ls",
name,
}
_, err := g.exec(ctx, params...)
if err != nil && strings.Contains(err.Error(), fmt.Sprintf("role \"%s\" not found", name)) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// CreateRole creates a role with specified privileges.
func (g *Govc) CreateRole(ctx context.Context, name string, privileges []string) error {
params := append([]string{"role.create", name}, privileges...)
if _, err := g.exec(ctx, params...); err != nil {
return fmt.Errorf("govc returned error %v", err)
}
return nil
}
// SetGroupRoleOnObject sets a role for a given group on target object.
func (g *Govc) SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error {
principal = principal + "@" + domain
params := []string{
"permissions.set",
"-group=true",
"-principal", principal,
"-role", role,
object,
}
if _, err := g.exec(ctx, params...); err != nil {
return fmt.Errorf("govc returned error %v", err)
}
return nil
}
| 1,146 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
)
const (
govcUsername = "GOVC_USERNAME"
govcPassword = "GOVC_PASSWORD"
govcURL = "GOVC_URL"
govcDatacenter = "GOVC_DATACENTER"
govcInsecure = "GOVC_INSECURE"
vSphereUsername = "EKSA_VSPHERE_USERNAME"
vSpherePassword = "EKSA_VSPHERE_PASSWORD"
vSphereServer = "VSPHERE_SERVER"
templateLibrary = "eks-a-templates"
expectedDeployOpts = `{"DiskProvisioning":"thin","NetworkMapping":[{"Name":"nic0","Network":"/SDDC-Datacenter/network/sddc-cgw-network-1"},{"Name":"VM Network","Network":"/SDDC-Datacenter/network/sddc-cgw-network-1"}]}`
)
var govcEnvironment = map[string]string{
govcUsername: "vsphere_username",
govcPassword: "vsphere_password",
govcURL: "vsphere_server",
govcDatacenter: "vsphere_datacenter",
govcInsecure: "false",
}
func setupContext(t *testing.T) {
t.Setenv(vSphereUsername, "vsphere_username")
t.Setenv(vSpherePassword, "vsphere_password")
t.Setenv(vSphereServer, "vsphere_server")
t.Setenv(govcUsername, os.Getenv(vSphereUsername))
t.Setenv(govcPassword, os.Getenv(vSpherePassword))
t.Setenv(govcURL, os.Getenv(vSphereServer))
t.Setenv(govcInsecure, "false")
t.Setenv(govcDatacenter, "vsphere_datacenter")
}
func setup(t *testing.T, opts ...executables.GovcOpt) (dir string, govc *executables.Govc, mockExecutable *mockexecutables.MockExecutable, env map[string]string) {
setupContext(t)
dir, writer := test.NewWriter(t)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
g := executables.NewGovc(executable, writer, opts...)
return dir, g, executable, govcEnvironment
}
type deployTemplateTest struct {
govc *executables.Govc
mockExecutable *mockexecutables.MockExecutable
env map[string]string
datacenter string
datastore string
dir string
network string
resourcePool string
templatePath string
ovaURL string
deployFolder string
templateInLibraryPathAbs string
templateName string
diskName string
diskSize int
resizeDisk2 bool
ctx context.Context
fakeExecResponse *bytes.Buffer
expectations []*gomock.Call
}
func newDeployTemplateTest(t *testing.T) *deployTemplateTest {
dir, g, exec, env := setup(t)
return &deployTemplateTest{
govc: g,
mockExecutable: exec,
env: env,
datacenter: "SDDC-Datacenter",
datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore",
dir: dir,
network: "/SDDC-Datacenter/network/sddc-cgw-network-1",
resourcePool: "*/Resources/Compute-ResourcePool",
templatePath: "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6",
ovaURL: "https://aws.com/ova",
deployFolder: "/SDDC-Datacenter/vm/Templates",
templateInLibraryPathAbs: "/eks-a-templates/ubuntu-2004-kube-v1.19.6",
templateName: "ubuntu-2004-kube-v1.19.6",
resizeDisk2: false,
ctx: context.Background(),
fakeExecResponse: bytes.NewBufferString("dummy"),
expectations: make([]*gomock.Call, 0),
diskName: "disk-31000-1",
diskSize: 20,
}
}
func (dt *deployTemplateTest) expectFolderInfoToReturn(err error) {
dt.expectations = append(
dt.expectations,
dt.mockExecutable.EXPECT().ExecuteWithEnv(dt.ctx, dt.env, "folder.info", dt.deployFolder).Return(*dt.fakeExecResponse, err),
)
}
func (dt *deployTemplateTest) expectDeployToReturn(err error) {
dt.expectations = append(
dt.expectations,
dt.mockExecutable.EXPECT().ExecuteWithEnv(dt.ctx, dt.env, "library.deploy", "-dc", dt.datacenter, "-ds", dt.datastore, "-pool", dt.resourcePool, "-folder", dt.deployFolder, "-options", test.OfType("string"), dt.templateInLibraryPathAbs, dt.templateName).Return(*dt.fakeExecResponse, err),
)
}
func (dt *deployTemplateTest) expectDevicesInfoToReturn(err error) {
dt.expectations = append(
dt.expectations,
dt.mockExecutable.EXPECT().ExecuteWithEnv(dt.ctx, dt.env, "device.info", "-dc", dt.datacenter, "-vm", dt.templateName, "-json").Return(*dt.fakeExecResponse, err),
)
}
func (dt *deployTemplateTest) expectResizeDiskToReturn(err error) {
dt.expectations = append(
dt.expectations,
dt.mockExecutable.EXPECT().ExecuteWithEnv(dt.ctx, dt.env, "vm.disk.change", "-dc", dt.datacenter, "-vm", dt.templateName, "-disk.name", dt.diskName, "-size", strconv.Itoa(dt.diskSize)+"G").Return(*dt.fakeExecResponse, err),
)
}
func (dt *deployTemplateTest) expectCreateSnapshotToReturn(err error) {
dt.expectations = append(
dt.expectations,
dt.mockExecutable.EXPECT().ExecuteWithEnv(dt.ctx, dt.env, "snapshot.create", "-dc", dt.datacenter, "-m=false", "-vm", dt.templatePath, "root").Return(*dt.fakeExecResponse, err),
)
}
func (dt *deployTemplateTest) expectMarkAsTemplateToReturn(err error) {
dt.expectations = append(
dt.expectations,
dt.mockExecutable.EXPECT().ExecuteWithEnv(dt.ctx, dt.env, "vm.markastemplate", "-dc", dt.datacenter, dt.templatePath).Return(*dt.fakeExecResponse, err),
)
}
func (dt *deployTemplateTest) DeployTemplateFromLibrary() error {
gomock.InOrder(dt.expectations...)
return dt.govc.DeployTemplateFromLibrary(dt.ctx, dt.deployFolder, dt.templateName, templateLibrary, dt.datacenter, dt.datastore, dt.network, dt.resourcePool, dt.resizeDisk2)
}
func (dt *deployTemplateTest) assertDeployTemplateSuccess(t *testing.T) {
if err := dt.DeployTemplateFromLibrary(); err != nil {
t.Fatalf("govc.DeployTemplateFromLibrary() err = %v, want err = nil", err)
}
}
func (dt *deployTemplateTest) assertDeployTemplateError(t *testing.T) {
if err := dt.DeployTemplateFromLibrary(); err == nil {
t.Fatal("govc.DeployTemplateFromLibrary() err = nil, want err not nil")
}
}
func (dt *deployTemplateTest) assertDeployOptsMatches(t *testing.T) {
g := NewWithT(t)
actual, err := os.ReadFile(filepath.Join(dt.dir, executables.DeployOptsFile))
if err != nil {
t.Fatalf("failed to read deploy options file: %v", err)
}
g.Expect(string(actual)).To(Equal(expectedDeployOpts))
}
func TestSearchTemplateItExists(t *testing.T) {
ctx := context.Background()
template := "my-template"
datacenter := "SDDC-Datacenter"
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "find", "-json", "/"+datacenter, "-type", "VirtualMachine", "-name", filepath.Base(template)).Return(*bytes.NewBufferString("[\"/SDDC Datacenter/vm/Templates/ubuntu 2004-kube-v1.19.6\"]"), nil)
_, err := g.SearchTemplate(ctx, datacenter, template)
if err != nil {
t.Fatalf("Govc.SearchTemplate() exists = false, want true %v", err)
}
}
func TestSearchTemplateItDoesNotExists(t *testing.T) {
template := "my-template"
ctx := context.Background()
datacenter := "SDDC-Datacenter"
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "find", "-json", "/"+datacenter, "-type", "VirtualMachine", "-name", filepath.Base(template)).Return(*bytes.NewBufferString(""), nil)
templateFullPath, err := g.SearchTemplate(ctx, datacenter, template)
if err == nil && len(templateFullPath) > 0 {
t.Fatalf("Govc.SearchTemplate() exists = true, want false %v", err)
}
}
func TestSearchTemplateError(t *testing.T) {
template := "my-template"
ctx := context.Background()
datacenter := "SDDC-Datacenter"
_, g, executable, env := setup(t)
g.Retrier = retrier.NewWithMaxRetries(5, 0)
executable.EXPECT().ExecuteWithEnv(ctx, env, gomock.Any()).Return(bytes.Buffer{}, errors.New("error from execute with env")).Times(5)
_, err := g.SearchTemplate(ctx, datacenter, template)
if err == nil {
t.Fatal("Govc.SearchTemplate() err = nil, want err not nil")
}
}
func TestLibraryElementExistsItExists(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.ls", templateLibrary).Return(*bytes.NewBufferString("testing"), nil)
exists, err := g.LibraryElementExists(ctx, templateLibrary)
if err != nil {
t.Fatalf("Govc.LibraryElementExists() err = %v, want err nil", err)
}
if !exists {
t.Fatalf("Govc.LibraryElementExists() exists = false, want true")
}
}
func TestLibraryElementExistsItDoesNotExists(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.ls", templateLibrary).Return(*bytes.NewBufferString(""), nil)
exists, err := g.LibraryElementExists(ctx, templateLibrary)
if err != nil {
t.Fatalf("Govc.LibraryElementExists() err = %v, want err nil", err)
}
if exists {
t.Fatalf("Govc.LibraryElementExists() exists = true, want false")
}
}
func TestLibraryElementExistsError(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.ls", templateLibrary).Return(bytes.Buffer{}, errors.New("error from execute with env"))
_, err := g.LibraryElementExists(ctx, templateLibrary)
if err == nil {
t.Fatal("Govc.LibraryElementExists() err = nil, want err not nil")
}
}
func TestGetLibraryElementContentVersionSuccess(t *testing.T) {
ctx := context.Background()
response := `[
{
"content_version": "1"
},
]`
libraryElement := "/eks-a-templates/ubuntu-2004-kube-v1.19.6"
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.info", "-json", libraryElement).Return(*bytes.NewBufferString(response), nil)
_, err := g.GetLibraryElementContentVersion(ctx, libraryElement)
if err != nil {
t.Fatalf("Govc.GetLibraryElementContentVersion() err = %v, want err nil", err)
}
}
func TestGetLibraryElementContentVersionError(t *testing.T) {
ctx := context.Background()
libraryElement := "/eks-a-templates/ubuntu-2004-kube-v1.19.6"
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.info", "-json", libraryElement).Return(bytes.Buffer{}, errors.New("error from execute with env"))
_, err := g.GetLibraryElementContentVersion(ctx, libraryElement)
if err == nil {
t.Fatal("Govc.GetLibraryElementContentVersion() err = nil, want err not nil")
}
}
func TestDeleteLibraryElementSuccess(t *testing.T) {
ctx := context.Background()
libraryElement := "/eks-a-templates/ubuntu-2004-kube-v1.19.6"
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.rm", libraryElement).Return(*bytes.NewBufferString(""), nil)
err := g.DeleteLibraryElement(ctx, libraryElement)
if err != nil {
t.Fatalf("Govc.DeleteLibraryElement() err = %v, want err nil", err)
}
}
func TestDeleteLibraryElementError(t *testing.T) {
ctx := context.Background()
libraryElement := "/eks-a-templates/ubuntu-2004-kube-v1.19.6"
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.rm", libraryElement).Return(bytes.Buffer{}, errors.New("error from execute with env"))
err := g.DeleteLibraryElement(ctx, libraryElement)
if err == nil {
t.Fatal("Govc.DeleteLibraryElement() err = nil, want err not nil")
}
}
func TestGovcTemplateHasSnapshot(t *testing.T) {
_, writer := test.NewWriter(t)
template := "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6"
env := govcEnvironment
ctx := context.Background()
mockCtrl := gomock.NewController(t)
setupContext(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
params := []string{"snapshot.tree", "-vm", template}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(*bytes.NewBufferString("testing"), nil)
g := executables.NewGovc(executable, writer)
snap, err := g.TemplateHasSnapshot(ctx, template)
if err != nil {
t.Fatalf("error getting template snapshot: %v", err)
}
if !snap {
t.Fatalf("Govc.TemplateHasSnapshot() error got = %+v, want %+v", snap, true)
}
}
func TestGovcGetWorkloadAvailableSpace(t *testing.T) {
tests := []struct {
testName string
jsonResponseFile string
wantValue float64
}{
{
testName: "success",
jsonResponseFile: "testdata/govc_no_datastore.json",
wantValue: 1,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
_, writer := test.NewWriter(t)
fileContent := test.ReadFile(t, tt.jsonResponseFile)
env := govcEnvironment
datastore := "/SDDC-Datacenter/datastore/WorkloadDatastore"
ctx := context.Background()
mockCtrl := gomock.NewController(t)
setupContext(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
params := []string{"datastore.info", "-json=true", datastore}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(*bytes.NewBufferString(fileContent), nil)
g := executables.NewGovc(executable, writer)
freeSpace, err := g.GetWorkloadAvailableSpace(ctx, datastore)
if err != nil {
t.Fatalf("Govc.GetWorkloadAvailableSpace() error: %v", err)
}
if freeSpace != tt.wantValue {
t.Fatalf("Govc.GetWorkloadAvailableSpace() freeSpace = %+v, want %+v", freeSpace, tt.wantValue)
}
})
}
}
func TestDeployTemplateFromLibrarySuccess(t *testing.T) {
tt := newDeployTemplateTest(t)
tt.expectFolderInfoToReturn(nil)
tt.expectDeployToReturn(nil)
tt.expectCreateSnapshotToReturn(nil)
tt.expectMarkAsTemplateToReturn(nil)
tt.assertDeployTemplateSuccess(t)
tt.assertDeployOptsMatches(t)
}
func TestDeployTemplateFromLibraryResizeBRSuccess(t *testing.T) {
tt := newDeployTemplateTest(t)
tt.resizeDisk2 = true
response := map[string][]interface{}{
"Devices": {
map[string]interface{}{
"Name": "disk-31000-0",
"DeviceInfo": map[string]string{
"Label": "Hard disk 1",
},
},
map[string]interface{}{
"Name": "disk-31000-1",
"DeviceInfo": map[string]string{
"Label": "Hard disk 2",
},
},
},
}
mashaledResponse, err := json.Marshal(response)
if err != nil {
t.Fatalf("failed to marshal response: %v", err)
}
responseBytes := bytes.NewBuffer(mashaledResponse)
tt.fakeExecResponse = responseBytes
tt.expectFolderInfoToReturn(nil)
tt.expectDeployToReturn(nil)
tt.expectDevicesInfoToReturn(nil)
tt.expectResizeDiskToReturn(nil)
tt.expectCreateSnapshotToReturn(nil)
tt.expectMarkAsTemplateToReturn(nil)
tt.assertDeployTemplateSuccess(t)
tt.assertDeployOptsMatches(t)
}
func TestDeployTemplateFromLibraryErrorDeploy(t *testing.T) {
tt := newDeployTemplateTest(t)
tt.expectFolderInfoToReturn(nil)
tt.expectDeployToReturn(errors.New("error exec"))
tt.assertDeployTemplateError(t)
}
func TestDeployTemplateFromLibraryErrorCreateSnapshot(t *testing.T) {
tt := newDeployTemplateTest(t)
tt.expectFolderInfoToReturn(nil)
tt.expectDeployToReturn(nil)
tt.expectCreateSnapshotToReturn(errors.New("error exec"))
tt.assertDeployTemplateError(t)
}
func TestDeployTemplateFromLibraryErrorMarkAsTemplate(t *testing.T) {
tt := newDeployTemplateTest(t)
tt.expectFolderInfoToReturn(nil)
tt.expectDeployToReturn(nil)
tt.expectCreateSnapshotToReturn(nil)
tt.expectMarkAsTemplateToReturn(errors.New("error exec"))
tt.assertDeployTemplateError(t)
}
func TestGovcValidateVCenterSetupMachineConfig(t *testing.T) {
ctx := context.Background()
ts := newHTTPSServer(t)
datacenterConfig := v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Datacenter: "SDDC Datacenter",
Network: "/SDDC Datacenter/network/test network",
Server: strings.TrimPrefix(ts.URL, "https://"),
Insecure: true,
},
}
machineConfig := v1alpha1.VSphereMachineConfig{
Spec: v1alpha1.VSphereMachineConfigSpec{
Datastore: "/SDDC Datacenter/datastore/testDatastore",
Folder: "/SDDC Datacenter/vm/test",
ResourcePool: "*/Resources/Compute ResourcePool",
},
}
env := govcEnvironment
mockCtrl := gomock.NewController(t)
_, writer := test.NewWriter(t)
selfSigned := true
setupContext(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
var params []string
params = []string{"datastore.info", machineConfig.Spec.Datastore}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(bytes.Buffer{}, nil)
params = []string{"folder.info", machineConfig.Spec.Folder}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(bytes.Buffer{}, nil)
datacenter := "/" + datacenterConfig.Spec.Datacenter
resourcePoolName := "Compute ResourcePool"
params = []string{"find", "-json", datacenter, "-type", "p", "-name", resourcePoolName}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(*bytes.NewBufferString("[\"/SDDC Datacenter/host/Cluster-1/Resources/Compute ResourcePool\"]"), nil)
g := executables.NewGovc(executable, writer)
err := g.ValidateVCenterSetupMachineConfig(ctx, &datacenterConfig, &machineConfig, &selfSigned)
if err != nil {
t.Fatalf("Govc.ValidateVCenterSetup() error: %v", err)
}
}
func newHTTPSServer(t *testing.T) *httptest.Server {
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if _, err := w.Write([]byte("ready")); err != nil {
t.Errorf("Failed writing response to http request: %s", err)
}
}))
t.Cleanup(func() { ts.Close() })
return ts
}
func TestGovcCleanupVms(t *testing.T) {
ctx := context.Background()
clusterName := "cluster"
vmName := clusterName
var dryRun bool
env := govcEnvironment
mockCtrl := gomock.NewController(t)
_, writer := test.NewWriter(t)
setupContext(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
var params []string
params = []string{"find", "/" + env[govcDatacenter], "-type", "VirtualMachine", "-name", clusterName + "*"}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(*bytes.NewBufferString(clusterName), nil)
params = []string{"vm.power", "-off", "-force", vmName}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(bytes.Buffer{}, nil)
params = []string{"object.destroy", vmName}
executable.EXPECT().ExecuteWithEnv(ctx, env, params).Return(bytes.Buffer{}, nil)
g := executables.NewGovc(executable, writer)
err := g.CleanupVms(ctx, clusterName, dryRun)
if err != nil {
t.Fatalf("Govc.CleanupVms() error: %v", err)
}
}
func TestCreateLibrarySuccess(t *testing.T) {
datastore := "/SDDC-Datacenter/datastore/WorkloadDatastore"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.create", "-ds", datastore, templateLibrary).Return(*bytes.NewBufferString("testing"), nil)
err := g.CreateLibrary(ctx, datastore, templateLibrary)
if err != nil {
t.Fatalf("Govc.CreateLibrary() err = %v, want err nil", err)
}
}
func TestCreateLibraryError(t *testing.T) {
datastore := "/SDDC-Datacenter/datastore/WorkloadDatastore"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.create", "-ds", datastore, templateLibrary).Return(bytes.Buffer{}, errors.New("error from execute with env"))
err := g.CreateLibrary(ctx, datastore, templateLibrary)
if err == nil {
t.Fatal("Govc.CreateLibrary() err = nil, want err not nil")
}
}
func TestGetTagsSuccessNoTags(t *testing.T) {
path := "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.attached.ls", "-json", "-r", path).Return(*bytes.NewBufferString("null"), nil)
tags, err := g.GetTags(ctx, path)
if err != nil {
t.Fatalf("Govc.GetTags() err = %v, want err nil", err)
}
if len(tags) != 0 {
t.Fatalf("Govc.GetTags() tags size = %d, want 0", len(tags))
}
}
func TestGetTagsSuccessHasTags(t *testing.T) {
path := "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6"
ctx := context.Background()
tagsReponse := `[
"kubernetesChannel:1.19",
"eksd:1.19-4"
]`
wantTags := []string{"kubernetesChannel:1.19", "eksd:1.19-4"}
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.attached.ls", "-json", "-r", path).Return(*bytes.NewBufferString(tagsReponse), nil)
gotTags, err := g.GetTags(ctx, path)
if err != nil {
t.Fatalf("Govc.GetTags() err = %v, want err nil", err)
}
if !reflect.DeepEqual(gotTags, wantTags) {
t.Fatalf("Govc.GetTags() tags %v, want %v", gotTags, wantTags)
}
}
func TestGetTagsErrorGovc(t *testing.T) {
path := "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6"
ctx := context.Background()
_, g, executable, env := setup(t)
g.Retrier = retrier.NewWithMaxRetries(5, 0)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.attached.ls", "-json", "-r", path).Return(bytes.Buffer{}, errors.New("error from exec")).Times(5)
_, err := g.GetTags(ctx, path)
if err == nil {
t.Fatal("Govc.GetTags() err = nil, want err not nil")
}
}
func TestGetTagsErrorUnmarshalling(t *testing.T) {
path := "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.attached.ls", "-json", "-r", path).Return(*bytes.NewBufferString("invalid"), nil)
_, err := g.GetTags(ctx, path)
if err == nil {
t.Fatal("Govc.GetTags() err = nil, want err not nil")
}
}
func TestListTagsSuccessNoTags(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.ls", "-json").Return(*bytes.NewBufferString("null"), nil)
tags, err := g.ListTags(ctx)
if err != nil {
t.Fatalf("Govc.ListTags() err = %v, want err nil", err)
}
if len(tags) != 0 {
t.Fatalf("Govc.ListTags() tags size = %d, want 0", len(tags))
}
}
func TestListTagsSuccessHasTags(t *testing.T) {
ctx := context.Background()
tagsReponse := `[
{
"id": "urn:vmomi:InventoryServiceTag:5555:GLOBAL",
"name": "eksd:1.19-4",
"category_id": "eksd"
},
{
"id": "urn:vmomi:InventoryServiceTag:5555:GLOBAL",
"name": "kubernetesChannel:1.19",
"category_id": "kubernetesChannel"
}
]`
wantTags := []executables.Tag{
{
Name: "eksd:1.19-4",
Id: "urn:vmomi:InventoryServiceTag:5555:GLOBAL",
CategoryId: "eksd",
},
{
Name: "kubernetesChannel:1.19",
Id: "urn:vmomi:InventoryServiceTag:5555:GLOBAL",
CategoryId: "kubernetesChannel",
},
}
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.ls", "-json").Return(*bytes.NewBufferString(tagsReponse), nil)
gotTags, err := g.ListTags(ctx)
if err != nil {
t.Fatalf("Govc.ListTags() err = %v, want err nil", err)
}
if !reflect.DeepEqual(gotTags, wantTags) {
t.Fatalf("Govc.ListTags() tags = %v, want %v", gotTags, wantTags)
}
}
func TestListTagsErrorGovc(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.ls", "-json").Return(bytes.Buffer{}, errors.New("error from exec"))
_, err := g.ListTags(ctx)
if err == nil {
t.Fatal("Govc.ListTags() err = nil, want err not nil")
}
}
func TestListTagsErrorUnmarshalling(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.ls", "-json").Return(*bytes.NewBufferString("invalid"), nil)
_, err := g.ListTags(ctx)
if err == nil {
t.Fatal("Govc.ListTags() err = nil, want err not nil")
}
}
func TestAddTagSuccess(t *testing.T) {
tag := "tag"
path := "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.attach", tag, path).Return(*bytes.NewBufferString(""), nil)
err := g.AddTag(ctx, path, tag)
if err != nil {
t.Fatalf("Govc.AddTag() err = %v, want err nil", err)
}
}
func TestEnvMapOverride(t *testing.T) {
category := "category"
tag := "tag"
ctx := context.Background()
envOverride := map[string]string{
govcUsername: "override_vsphere_username",
govcPassword: "override_vsphere_password",
govcURL: "override_vsphere_server",
govcDatacenter: "override_vsphere_datacenter",
govcInsecure: "false",
}
_, g, executable, _ := setup(t, executables.WithGovcEnvMap(envOverride))
executable.EXPECT().ExecuteWithEnv(ctx, envOverride, "tags.create", "-c", category, tag).Return(*bytes.NewBufferString(""), nil)
err := g.CreateTag(ctx, tag, category)
if err != nil {
t.Fatalf("Govc.CreateTag() with envMap override err = %v, want err nil", err)
}
}
func TestAddTagError(t *testing.T) {
tag := "tag"
path := "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.attach", tag, path).Return(bytes.Buffer{}, errors.New("error from execute with env"))
err := g.AddTag(ctx, path, tag)
if err == nil {
t.Fatal("Govc.AddTag() err = nil, want err not nil")
}
}
func TestCreateTagSuccess(t *testing.T) {
category := "category"
tag := "tag"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.create", "-c", category, tag).Return(*bytes.NewBufferString(""), nil)
err := g.CreateTag(ctx, tag, category)
if err != nil {
t.Fatalf("Govc.CreateTag() err = %v, want err nil", err)
}
}
func TestCreateTagError(t *testing.T) {
category := "category"
tag := "tag"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.create", "-c", category, tag).Return(bytes.Buffer{}, errors.New("error from execute with env"))
err := g.CreateTag(ctx, tag, category)
if err == nil {
t.Fatal("Govc.CreateTag() err = nil, want err not nil")
}
}
func TestListCategoriesSuccessNoCategories(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.category.ls", "-json").Return(*bytes.NewBufferString("null"), nil)
gotCategories, err := g.ListCategories(ctx)
if err != nil {
t.Fatalf("Govc.ListCategories() err = %v, want err nil", err)
}
if len(gotCategories) != 0 {
t.Fatalf("Govc.ListCategories() tags size = %d, want 0", len(gotCategories))
}
}
func TestListCategoriesSuccessHasCategories(t *testing.T) {
ctx := context.Background()
catsResponse := `[
{
"id": "urn:vmomi:InventoryServiceCategory:78484:GLOBAL",
"name": "eksd",
"cardinality": "MULTIPLE",
"associable_types": [
"com.vmware.content.library.Item",
"VirtualMachine"
]
},
{
"id": "urn:vmomi:InventoryServiceCategory:78484:GLOBAL",
"name": "kubernetesChannel",
"cardinality": "SINGLE",
"associable_types": [
"VirtualMachine"
]
}
]`
wantCats := []string{"eksd", "kubernetesChannel"}
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.category.ls", "-json").Return(*bytes.NewBufferString(catsResponse), nil)
gotCats, err := g.ListCategories(ctx)
if err != nil {
t.Fatalf("Govc.ListCategories() err = %v, want err nil", err)
}
if !reflect.DeepEqual(gotCats, wantCats) {
t.Fatalf("Govc.ListCategories() tags = %v, want %v", gotCats, wantCats)
}
}
func TestListCategoriesErrorGovc(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.category.ls", "-json").Return(bytes.Buffer{}, errors.New("error from exec"))
_, err := g.ListCategories(ctx)
if err == nil {
t.Fatal("Govc.ListCategories() err = nil, want err not nil")
}
}
func TestListCategoriesErrorUnmarshalling(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.category.ls", "-json").Return(*bytes.NewBufferString("invalid"), nil)
_, err := g.ListCategories(ctx)
if err == nil {
t.Fatal("Govc.ListCategories() err = nil, want err not nil")
}
}
func TestCreateCategoryForVMSuccess(t *testing.T) {
category := "category"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.category.create", "-t", "VirtualMachine", category).Return(*bytes.NewBufferString(""), nil)
err := g.CreateCategoryForVM(ctx, category)
if err != nil {
t.Fatalf("Govc.CreateCategoryForVM() err = %v, want err nil", err)
}
}
func TestCreateCategoryForVMError(t *testing.T) {
category := "category"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "tags.category.create", "-t", "VirtualMachine", category).Return(bytes.Buffer{}, errors.New("error from execute with env"))
err := g.CreateCategoryForVM(ctx, category)
if err == nil {
t.Fatal("Govc.CreateCategoryForVM() err = nil, want err not nil")
}
}
func TestImportTemplateSuccess(t *testing.T) {
ovaURL := "ovaURL"
name := "name"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.import", "-k", "-pull", "-n", name, templateLibrary, ovaURL).Return(*bytes.NewBufferString(""), nil)
if err := g.ImportTemplate(ctx, templateLibrary, ovaURL, name); err != nil {
t.Fatalf("Govc.ImportTemplate() err = %v, want err nil", err)
}
}
func TestImportTemplateError(t *testing.T) {
ovaURL := "ovaURL"
name := "name"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "library.import", "-k", "-pull", "-n", name, templateLibrary, ovaURL).Return(bytes.Buffer{}, errors.New("error from execute with env"))
if err := g.ImportTemplate(ctx, templateLibrary, ovaURL, name); err == nil {
t.Fatal("Govc.ImportTemplate() err = nil, want err not nil")
}
}
func TestDeleteTemplateSuccess(t *testing.T) {
template := "template"
resourcePool := "resourcePool"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "vm.markasvm", "-pool", resourcePool, template).Return(*bytes.NewBufferString(""), nil)
executable.EXPECT().ExecuteWithEnv(ctx, env, "snapshot.remove", "-vm", template, "*").Return(*bytes.NewBufferString(""), nil)
executable.EXPECT().ExecuteWithEnv(ctx, env, "vm.destroy", template).Return(*bytes.NewBufferString(""), nil)
if err := g.DeleteTemplate(ctx, resourcePool, template); err != nil {
t.Fatalf("Govc.DeleteTemplate() err = %v, want err nil", err)
}
}
func TestDeleteTemplateMarkAsVMError(t *testing.T) {
template := "template"
resourcePool := "resourcePool"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "vm.markasvm", "-pool", resourcePool, template).Return(bytes.Buffer{}, errors.New("error from execute with env"))
if err := g.DeleteTemplate(ctx, resourcePool, template); err == nil {
t.Fatal("Govc.DeleteTemplate() err = nil, want err not nil")
}
}
func TestDeleteTemplateRemoveSnapshotError(t *testing.T) {
template := "template"
resourcePool := "resourcePool"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "vm.markasvm", "-pool", resourcePool, template).Return(*bytes.NewBufferString(""), nil)
executable.EXPECT().ExecuteWithEnv(ctx, env, "snapshot.remove", "-vm", template, "*").Return(bytes.Buffer{}, errors.New("error from execute with env"))
if err := g.DeleteTemplate(ctx, resourcePool, template); err == nil {
t.Fatal("Govc.DeleteTemplate() err = nil, want err not nil")
}
}
func TestDeleteTemplateDeleteVMError(t *testing.T) {
template := "template"
resourcePool := "resourcePool"
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "vm.markasvm", "-pool", resourcePool, template).Return(*bytes.NewBufferString(""), nil)
executable.EXPECT().ExecuteWithEnv(ctx, env, "snapshot.remove", "-vm", template, "*").Return(*bytes.NewBufferString(""), nil)
executable.EXPECT().ExecuteWithEnv(ctx, env, "vm.destroy", template).Return(bytes.Buffer{}, errors.New("error from execute with env"))
if err := g.DeleteTemplate(ctx, resourcePool, template); err == nil {
t.Fatal("Govc.DeleteTemplate() err = nil, want err not nil")
}
}
func TestGovcLogoutSuccess(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "session.logout").Return(*bytes.NewBufferString(""), nil)
executable.EXPECT().ExecuteWithEnv(ctx, env, "session.logout", "-k").Return(*bytes.NewBufferString(""), nil)
if err := g.Logout(ctx); err != nil {
t.Fatalf("Govc.Logout() err = %v, want err nil", err)
}
}
func TestGovcValidateVCenterConnectionSuccess(t *testing.T) {
ctx := context.Background()
ts := newHTTPSServer(t)
_, g, _, _ := setup(t)
if err := g.ValidateVCenterConnection(ctx, strings.TrimPrefix(ts.URL, "https://")); err != nil {
t.Fatalf("Govc.ValidateVCenterConnection() err = %v, want err nil", err)
}
}
func TestGovcValidateVCenterAuthenticationSuccess(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "about", "-k").Return(*bytes.NewBufferString(""), nil)
if err := g.ValidateVCenterAuthentication(ctx); err != nil {
t.Fatalf("Govc.ValidateVCenterAuthentication() err = %v, want err nil", err)
}
}
func TestGovcValidateVCenterAuthenticationErrorNoDatacenter(t *testing.T) {
ctx := context.Background()
_, g, _, _ := setup(t)
t.Setenv(govcDatacenter, "")
if err := g.ValidateVCenterAuthentication(ctx); err == nil {
t.Fatal("Govc.ValidateVCenterAuthentication() err = nil, want err not nil")
}
}
func TestGovcIsCertSelfSignedTrue(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "about").Return(*bytes.NewBufferString(""), errors.New(""))
if !g.IsCertSelfSigned(ctx) {
t.Fatalf("Govc.IsCertSelfSigned) = false, want true")
}
}
func TestGovcIsCertSelfSignedFalse(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
executable.EXPECT().ExecuteWithEnv(ctx, env, "about").Return(*bytes.NewBufferString(""), nil)
if g.IsCertSelfSigned(ctx) {
t.Fatalf("Govc.IsCertSelfSigned) = true, want false")
}
}
func TestGovcGetCertThumbprintSuccess(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
wantThumbprint := "AB:AB:AB"
executable.EXPECT().ExecuteWithEnv(ctx, env, "about.cert", "-thumbprint", "-k").Return(*bytes.NewBufferString("server.com AB:AB:AB"), nil)
gotThumbprint, err := g.GetCertThumbprint(ctx)
if err != nil {
t.Fatalf("Govc.GetCertThumbprint() err = %v, want err nil", err)
}
if gotThumbprint != wantThumbprint {
t.Fatalf("Govc.GetCertThumbprint() thumbprint = %s, want %s", gotThumbprint, wantThumbprint)
}
}
func TestGovcGetCertThumbprintBadOutput(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
wantErr := "invalid thumbprint format"
executable.EXPECT().ExecuteWithEnv(ctx, env, "about.cert", "-thumbprint", "-k").Return(*bytes.NewBufferString("server.comAB:AB:AB"), nil)
if _, err := g.GetCertThumbprint(ctx); err == nil || err.Error() != wantErr {
t.Fatalf("Govc.GetCertThumbprint() err = %s, want err %s", err, wantErr)
}
}
func TestGovcConfigureCertThumbprint(t *testing.T) {
ctx := context.Background()
_, g, _, _ := setup(t)
server := "server.com"
thumbprint := "AB:AB:AB"
wantKnownHostsContent := "server.com AB:AB:AB"
if err := g.ConfigureCertThumbprint(ctx, server, thumbprint); err != nil {
t.Fatalf("Govc.ConfigureCertThumbprint() err = %v, want err nil", err)
}
path, ok := os.LookupEnv("GOVC_TLS_KNOWN_HOSTS")
if !ok {
t.Fatal("GOVC_TLS_KNOWN_HOSTS is not set")
}
gotKnownHostsContent := test.ReadFile(t, path)
if gotKnownHostsContent != wantKnownHostsContent {
t.Fatalf("GOVC_TLS_KNOWN_HOSTS file content = %s, want %s", gotKnownHostsContent, wantKnownHostsContent)
}
}
func TestGovcDatacenterExistsTrue(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
datacenter := "datacenter_1"
executable.EXPECT().ExecuteWithEnv(ctx, env, "datacenter.info", datacenter).Return(*bytes.NewBufferString(""), nil)
exists, err := g.DatacenterExists(ctx, datacenter)
if err != nil {
t.Fatalf("Govc.DatacenterExists() err = %v, want err nil", err)
}
if !exists {
t.Fatalf("Govc.DatacenterExists() = false, want true")
}
}
func TestGovcDatacenterExistsFalse(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
datacenter := "datacenter_1"
executable.EXPECT().ExecuteWithEnv(ctx, env, "datacenter.info", datacenter).Return(*bytes.NewBufferString("datacenter_1 not found"), errors.New("exit code 1"))
exists, err := g.DatacenterExists(ctx, datacenter)
if err != nil {
t.Fatalf("Govc.DatacenterExists() err = %v, want err nil", err)
}
if exists {
t.Fatalf("Govc.DatacenterExists() = true, want false")
}
}
func TestGovcNetworkExistsTrue(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
network := "/Networks/network_1"
networkName := "network_1"
networkDir := "/Networks"
executable.EXPECT().ExecuteWithEnv(ctx, env, "find", "-maxdepth=1", networkDir, "-type", "n", "-name", networkName).Return(*bytes.NewBufferString(network), nil)
exists, err := g.NetworkExists(ctx, network)
if err != nil {
t.Fatalf("Govc.NetworkExists() err = %v, want err nil", err)
}
if !exists {
t.Fatalf("Govc.NetworkExists() = false, want true")
}
}
func TestGovcNetworkExistsFalse(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
network := "/Networks/network_1"
networkName := "network_1"
networkDir := "/Networks"
executable.EXPECT().ExecuteWithEnv(ctx, env, "find", "-maxdepth=1", networkDir, "-type", "n", "-name", networkName).Return(*bytes.NewBufferString(""), nil)
exists, err := g.NetworkExists(ctx, network)
if err != nil {
t.Fatalf("Govc.NetworkExistsNetworkExists() err = %v, want err nil", err)
}
if exists {
t.Fatalf("Govc.NetworkExists() = true, want false")
}
}
func TestGovcCreateUser(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
username := "ralph"
password := "verysecret"
tests := []struct {
name string
wantErr error
}{
{
name: "test CreateGroup success",
wantErr: nil,
},
{
name: "test CreateGroup error",
wantErr: errors.New("operation failed"),
},
}
for _, tt := range tests {
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.user.create", "-p", password, username).Return(*bytes.NewBufferString(""), tt.wantErr)
err := g.CreateUser(ctx, username, password)
gt := NewWithT(t)
if tt.wantErr != nil {
gt.Expect(err).ToNot(BeNil())
} else {
gt.Expect(err).To(BeNil())
}
}
}
func TestGovcCreateGroup(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
group := "EKSA"
tests := []struct {
name string
wantErr error
}{
{
name: "test CreateGroup success",
wantErr: nil,
},
{
name: "test CreateGroup error",
wantErr: errors.New("operation failed"),
},
}
for _, tt := range tests {
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.group.create", group).Return(*bytes.NewBufferString(""), tt.wantErr)
err := g.CreateGroup(ctx, group)
gt := NewWithT(t)
if tt.wantErr != nil {
gt.Expect(err).ToNot(BeNil())
} else {
gt.Expect(err).To(BeNil())
}
}
}
func TestGovcUserExistsFalse(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
username := "eksa"
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.user.ls", username).Return(*bytes.NewBufferString(""), nil)
exists, err := g.UserExists(ctx, username)
gt := NewWithT(t)
gt.Expect(err).To(BeNil())
gt.Expect(exists).To(BeFalse())
}
func TestGovcUserExistsTrue(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
username := "eksa"
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.user.ls", username).Return(*bytes.NewBufferString(username), nil)
exists, err := g.UserExists(ctx, username)
gt := NewWithT(t)
gt.Expect(err).To(BeNil())
gt.Expect(exists).To(BeTrue())
}
func TestGovcUserExistsError(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
username := "eksa"
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.user.ls", username).Return(*bytes.NewBufferString(""), errors.New("operation failed"))
_, err := g.UserExists(ctx, username)
gt := NewWithT(t)
gt.Expect(err).ToNot(BeNil())
}
func TestGovcCreateRole(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
role := "EKSACloudAdmin"
privileges := []string{"vSphereDataProtection.Recovery", "vSphereDataProtection.Protection"}
tests := []struct {
name string
wantErr error
}{
{
name: "test CreateRole success",
wantErr: nil,
},
{
name: "test CreateRole error",
wantErr: errors.New("operation failed"),
},
}
for _, tt := range tests {
targetArgs := append([]string{"role.create", role}, privileges...)
executable.EXPECT().ExecuteWithEnv(ctx, env, targetArgs).Return(*bytes.NewBufferString(""), tt.wantErr)
err := g.CreateRole(ctx, role, privileges)
gt := NewWithT(t)
if tt.wantErr != nil {
gt.Expect(err).ToNot(BeNil())
} else {
gt.Expect(err).To(BeNil())
}
}
}
func TestGovcGroupExistsFalse(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
group := "EKSA"
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.group.ls", group).Return(*bytes.NewBufferString(""), nil)
exists, err := g.GroupExists(ctx, group)
gt := NewWithT(t)
gt.Expect(err).To(BeNil())
gt.Expect(exists).To(BeFalse())
}
func TestGovcGroupExistsTrue(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
group := "EKSA"
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.group.ls", group).Return(*bytes.NewBufferString(group), nil)
exists, err := g.GroupExists(ctx, group)
gt := NewWithT(t)
gt.Expect(err).To(BeNil())
gt.Expect(exists).To(BeTrue())
}
func TestGovcGroupExistsError(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
group := "EKSA"
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.group.ls", group).Return(*bytes.NewBufferString(""), errors.New("operation failed"))
_, err := g.GroupExists(ctx, group)
gt := NewWithT(t)
gt.Expect(err).ToNot(BeNil())
}
func TestGovcRoleExistsTrue(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
role := "EKSACloudAdmin"
executable.EXPECT().ExecuteWithEnv(ctx, env, "role.ls", role).Return(*bytes.NewBufferString(role), nil)
exists, err := g.RoleExists(ctx, role)
gt := NewWithT(t)
gt.Expect(err).To(BeNil())
gt.Expect(exists).To(BeTrue())
}
func TestGovcRoleExistsFalse(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
role := "EKSACloudAdmin"
executable.EXPECT().ExecuteWithEnv(ctx, env, "role.ls", role).Return(*bytes.NewBufferString(""), fmt.Errorf("role \"%s\" not found", role))
exists, err := g.RoleExists(ctx, role)
gt := NewWithT(t)
gt.Expect(err).To(BeNil())
gt.Expect(exists).To(BeFalse())
}
func TestGovcRoleExistsError(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
role := "EKSACloudAdmin"
executable.EXPECT().ExecuteWithEnv(ctx, env, "role.ls", role).Return(*bytes.NewBufferString(""), errors.New("operation failed"))
_, err := g.RoleExists(ctx, role)
gt := NewWithT(t)
gt.Expect(err).ToNot(BeNil())
}
func TestGovcAddUserToGroup(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
group := "EKSA"
username := "ralph"
tests := []struct {
name string
wantErr error
}{
{
name: "test AddUserToGroup success",
wantErr: nil,
},
{
name: "test AddUserToGroup error",
wantErr: errors.New("operation failed"),
},
}
for _, tt := range tests {
executable.EXPECT().ExecuteWithEnv(ctx, env, "sso.group.update", "-a", username, group).Return(*bytes.NewBufferString(""), tt.wantErr)
err := g.AddUserToGroup(ctx, group, username)
gt := NewWithT(t)
if tt.wantErr != nil {
gt.Expect(err).ToNot(BeNil())
} else {
gt.Expect(err).To(BeNil())
}
}
}
func TestGovcSetGroupRoleOnObject(t *testing.T) {
ctx := context.Background()
_, g, executable, env := setup(t)
principal := "EKSAGroup"
domain := "vsphere.local"
role := "EKSACloudAdmin"
object := "/Datacenter/vm/MyVirtualMachines"
tests := []struct {
name string
wantErr error
}{
{
name: "test SetGroupRoleOnObject success",
wantErr: nil,
},
{
name: "test SetGroupRoleOnObject error",
wantErr: errors.New("operation failed"),
},
}
for _, tt := range tests {
executable.EXPECT().ExecuteWithEnv(
ctx,
env,
"permissions.set",
"-group=true",
"-principal",
principal+"@"+domain,
"-role",
role,
object,
).Return(*bytes.NewBufferString(""), tt.wantErr)
err := g.SetGroupRoleOnObject(ctx, principal, role, object, domain)
gt := NewWithT(t)
if tt.wantErr != nil {
gt.Expect(err).ToNot(BeNil())
} else {
gt.Expect(err).To(BeNil())
}
}
}
func TestGovcGetVMDiskSizeInGB(t *testing.T) {
datacenter := "SDDC-Datacenter"
template := "bottlerocket-kube-v1.24.6"
ctx := context.Background()
_, g, executable, env := setup(t)
gt := NewWithT(t)
response := map[string][]interface{}{
"Devices": {
map[string]interface{}{
"Name": "disk-31000-0",
"DeviceInfo": map[string]string{
"Label": "Hard disk 1",
},
"CapacityInKB": 25 * 1024 * 1024, // 25GB in KB
},
},
}
mashaledResponse, err := json.Marshal(response)
if err != nil {
t.Fatalf("failed to marshal response: %v", err)
}
responseBytes := bytes.NewBuffer(mashaledResponse)
executable.EXPECT().ExecuteWithEnv(ctx, env, "device.info", "-dc", datacenter, "-vm", template, "-json", "disk-*").Return(*responseBytes, nil)
size, err := g.GetVMDiskSizeInGB(ctx, template, datacenter)
gt.Expect(err).To(BeNil())
gt.Expect(size).To(Equal(25))
}
func TestGovcGetVMDiskSizeInGBError(t *testing.T) {
datacenter := "SDDC-Datacenter"
template := "bottlerocket-kube-v1.24.6"
ctx := context.Background()
_, g, executable, env := setup(t)
govcErr := errors.New("error DevicesInfo()")
tests := []struct {
testName string
response map[string][]interface{}
govcErr error
wantErr error
}{
{
testName: "devices_info_govc_error",
response: nil,
govcErr: govcErr,
wantErr: fmt.Errorf("getting disk size for vm %s: getting template device information: %v", template, govcErr),
},
{
testName: "devices_info_no_devices",
response: map[string][]interface{}{
"Devices": {},
},
govcErr: nil,
wantErr: fmt.Errorf("no disks found for vm %s", template),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
gt := NewWithT(t)
mashaledResponse, err := json.Marshal(tt.response)
if err != nil {
t.Fatalf("failed to marshal response: %v", err)
}
responseBytes := bytes.NewBuffer(mashaledResponse)
executable.EXPECT().ExecuteWithEnv(ctx, env, "device.info", "-dc", datacenter, "-vm", template, "-json", "disk-*").Return(*responseBytes, tt.govcErr)
_, err = g.GetVMDiskSizeInGB(ctx, template, datacenter)
gt.Expect(err.Error()).To(Equal(tt.wantErr.Error()))
})
}
}
func TestGovcGetHardDiskSize(t *testing.T) {
datacenter := "SDDC-Datacenter"
template := "bottlerocket-kube-v1-21"
ctx := context.Background()
wantDiskMap := map[string]float64{
"Hard disk 1": 2097152,
"Hard disk 2": 20971520,
}
_, g, executable, env := setup(t)
gt := NewWithT(t)
response := map[string][]interface{}{
"Devices": {
map[string]interface{}{
"Name": "disk-31000-0",
"DeviceInfo": map[string]string{
"Label": "Hard disk 1",
},
"CapacityInKB": 2097152,
}, map[string]interface{}{
"Name": "disk-31000-1",
"DeviceInfo": map[string]string{
"Label": "Hard disk 2",
},
"CapacityInKB": 20971520,
},
},
}
marshaledResponse, err := json.Marshal(response)
if err != nil {
t.Fatalf("failed to marshal response: %v", err)
}
responseBytes := bytes.NewBuffer(marshaledResponse)
executable.EXPECT().ExecuteWithEnv(ctx, env, "device.info", "-dc", datacenter, "-vm", template, "-json", "disk-*").Return(*responseBytes, nil)
diskSizeMap, err := g.GetHardDiskSize(ctx, template, datacenter)
gt.Expect(err).To(BeNil())
gt.Expect(diskSizeMap).To(Equal(wantDiskMap))
}
func TestGovcGetHardDiskSizeError(t *testing.T) {
datacenter := "SDDC-Datacenter"
template := "bottlerocket-kube-v1-21"
ctx := context.Background()
_, g, executable, env := setup(t)
govcErr := errors.New("error DevicesInfo()")
tests := []struct {
testName string
response map[string][]interface{}
govcErr error
wantErr error
}{
{
testName: "devices_info_govc_error",
response: nil,
govcErr: govcErr,
wantErr: fmt.Errorf("getting hard disk sizes for vm %s: getting template device information: %v", template, govcErr),
},
{
testName: "devices_info_no_devices",
response: map[string][]interface{}{
"Devices": {},
},
govcErr: nil,
wantErr: fmt.Errorf("no hard disks found for vm %s", template),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
gt := NewWithT(t)
marshaledResponse, err := json.Marshal(tt.response)
if err != nil {
t.Fatalf("failed to marshal response: %v", err)
}
responseBytes := bytes.NewBuffer(marshaledResponse)
executable.EXPECT().ExecuteWithEnv(ctx, env, "device.info", "-dc", datacenter, "-vm", template, "-json", "disk-*").Return(*responseBytes, tt.govcErr)
_, err = g.GetHardDiskSize(ctx, template, datacenter)
gt.Expect(err.Error()).To(Equal(tt.wantErr.Error()))
})
}
}
| 1,646 |
eks-anywhere | aws | Go | package executables
import (
"context"
"fmt"
"strings"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/registrymirror"
)
const (
helmPath = "helm"
insecureSkipVerifyFlag = "--insecure-skip-tls-verify"
)
type Helm struct {
executable Executable
registryMirror *registrymirror.RegistryMirror
env map[string]string
insecure bool
}
type HelmOpt func(*Helm)
// WithRegistryMirror sets up registry mirror for helm.
func WithRegistryMirror(mirror *registrymirror.RegistryMirror) HelmOpt {
return func(h *Helm) {
h.registryMirror = mirror
}
}
// WithInsecure configures helm to skip validating TLS certificates when
// communicating with the Kubernetes API.
func WithInsecure() HelmOpt {
return func(h *Helm) {
h.insecure = true
}
}
// join the default and the provided maps together.
func WithEnv(env map[string]string) HelmOpt {
return func(h *Helm) {
for k, v := range env {
h.env[k] = v
}
}
}
func NewHelm(executable Executable, opts ...HelmOpt) *Helm {
h := &Helm{
executable: executable,
env: map[string]string{
"HELM_EXPERIMENTAL_OCI": "1",
},
insecure: false,
}
for _, o := range opts {
o(h)
}
return h
}
func (h *Helm) Template(ctx context.Context, ociURI, version, namespace string, values interface{}, kubeVersion string) ([]byte, error) {
valuesYaml, err := yaml.Marshal(values)
if err != nil {
return nil, fmt.Errorf("failed marshalling values for helm template: %v", err)
}
params := []string{"template", h.url(ociURI), "--version", version, "--namespace", namespace, "--kube-version", kubeVersion}
params = h.addInsecureFlagIfProvided(params)
params = append(params, "-f", "-")
result, err := h.executable.Command(ctx, params...).WithStdIn(valuesYaml).WithEnvVars(h.env).Run()
if err != nil {
return nil, err
}
return result.Bytes(), nil
}
func (h *Helm) PullChart(ctx context.Context, ociURI, version string) error {
params := []string{"pull", h.url(ociURI), "--version", version}
params = h.addInsecureFlagIfProvided(params)
_, err := h.executable.Command(ctx, params...).
WithEnvVars(h.env).Run()
return err
}
func (h *Helm) PushChart(ctx context.Context, chart, registry string) error {
logger.Info("Pushing", "chart", chart)
params := []string{"push", chart, registry}
params = h.addInsecureFlagIfProvided(params)
_, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run()
return err
}
func (h *Helm) RegistryLogin(ctx context.Context, registry, username, password string) error {
logger.Info("Logging in to helm registry", "registry", registry)
params := []string{"registry", "login", registry, "--username", username, "--password-stdin"}
if h.insecure {
params = append(params, "--insecure")
}
_, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).WithStdIn([]byte(password)).Run()
return err
}
func (h *Helm) SaveChart(ctx context.Context, ociURI, version, folder string) error {
params := []string{"pull", h.url(ociURI), "--version", version, "--destination", folder}
params = h.addInsecureFlagIfProvided(params)
_, err := h.executable.Command(ctx, params...).
WithEnvVars(h.env).Run()
return err
}
func (h *Helm) InstallChartFromName(ctx context.Context, ociURI, kubeConfig, name, version string) error {
// Using upgrade --install will install the chart if it doesn't exist, but
// upgrades it otherwise, making this more idempotent than install, which
// would error out if the chart is already installed, and has no similar
// "--upgrade" flag.
params := []string{"upgrade", "--install", name, ociURI, "--version", version, "--kubeconfig", kubeConfig}
params = h.addInsecureFlagIfProvided(params)
_, err := h.executable.Command(ctx, params...).
WithEnvVars(h.env).Run()
return err
}
// InstallChart installs a helm chart to the target cluster.
//
// If kubeconfigFilePath is the empty string, it won't be passed at all.
func (h *Helm) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error {
valueArgs := GetHelmValueArgs(values)
params := []string{"upgrade", "--install", chart, ociURI, "--version", version}
if skipCRDs {
params = append(params, "--skip-crds")
}
params = append(params, valueArgs...)
if kubeconfigFilePath != "" {
params = append(params, "--kubeconfig", kubeconfigFilePath)
}
if len(namespace) > 0 {
params = append(params, "--create-namespace", "--namespace", namespace)
}
if valueFilePath != "" {
params = append(params, "-f", valueFilePath)
}
params = h.addInsecureFlagIfProvided(params)
logger.Info("Installing helm chart on cluster", "chart", chart, "version", version)
_, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run()
return err
}
// InstallChartWithValuesFile installs a helm chart with the provided values file and waits for the chart deployment to be ready
// The default timeout for the chart to reach ready state is 5m.
func (h *Helm) InstallChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string) error {
params := []string{"upgrade", "--install", chart, ociURI, "--version", version, "--values", valuesFilePath, "--kubeconfig", kubeconfigFilePath, "--wait"}
params = h.addInsecureFlagIfProvided(params)
_, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run()
return err
}
// Delete removes an installation.
func (h *Helm) Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error {
params := []string{
"delete", installName,
"--kubeconfig", kubeconfigFilePath,
}
if namespace != "" {
params = append(params, "--namespace", namespace)
}
params = h.addInsecureFlagIfProvided(params)
if _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run(); err != nil {
return fmt.Errorf("deleting helm installation %w", err)
}
logger.V(6).Info("Deleted helm installation", "name", installName, "namespace", namespace)
return nil
}
func (h *Helm) ListCharts(ctx context.Context, kubeconfigFilePath string) ([]string, error) {
params := []string{"list", "-q", "--kubeconfig", kubeconfigFilePath}
out, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run()
if err != nil {
return nil, err
}
charts := strings.FieldsFunc(out.String(), func(c rune) bool {
return c == '\n'
})
return charts, nil
}
func (h *Helm) addInsecureFlagIfProvided(params []string) []string {
if h.insecure {
return append(params, insecureSkipVerifyFlag)
}
return params
}
func (h *Helm) url(originalURL string) string {
return h.registryMirror.ReplaceRegistry(originalURL)
}
func GetHelmValueArgs(values []string) []string {
valueArgs := []string{}
for _, value := range values {
valueArgs = append(valueArgs, "--set", value)
}
return valueArgs
}
// UpgradeChartWithValuesFile tuns a helm upgrade with the provided values file and waits for the
// chart deployment to be ready.
func (h *Helm) UpgradeChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string, opts ...HelmOpt) error {
params := []string{
"upgrade", chart, ociURI,
"--version", version,
"--values", valuesFilePath,
"--kubeconfig", kubeconfigFilePath,
"--wait",
}
for _, opt := range opts {
opt(h)
}
params = h.addInsecureFlagIfProvided(params)
_, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run()
return err
}
| 235 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"errors"
"fmt"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/registrymirror"
)
type helmTest struct {
*WithT
ctx context.Context
h *executables.Helm
e *mocks.MockExecutable
envVars map[string]string
}
func newHelmTest(t *testing.T, opts ...executables.HelmOpt) *helmTest {
ctrl := gomock.NewController(t)
e := mocks.NewMockExecutable(ctrl)
return &helmTest{
WithT: NewWithT(t),
ctx: context.Background(),
h: executables.NewHelm(e, opts...),
e: e,
envVars: map[string]string{
"HELM_EXPERIMENTAL_OCI": "1",
},
}
}
type helmTemplateTest struct {
*helmTest
values interface{}
valuesYaml []byte
ociURI, version, namespace string
wantTemplateContent []byte
}
func newHelmTemplateTest(t *testing.T, opts ...executables.HelmOpt) *helmTemplateTest {
return &helmTemplateTest{
helmTest: newHelmTest(t, opts...),
values: map[string]string{
"key1": "values1",
"key2": "values2",
},
valuesYaml: []byte(`key1: values1
key2: values2
`,
),
ociURI: "oci://public.ecr.aws/account/charts",
version: "1.1.1",
namespace: "kube-system",
wantTemplateContent: []byte("template-content"),
}
}
func TestHelmTemplateSuccess(t *testing.T) {
tt := newHelmTemplateTest(t)
expectCommand(
tt.e, tt.ctx, "template", tt.ociURI, "--version", tt.version, "--namespace", tt.namespace, "--kube-version", "1.22", "-f", "-",
).withStdIn(tt.valuesYaml).withEnvVars(tt.envVars).to().Return(*bytes.NewBuffer(tt.wantTemplateContent), nil)
tt.Expect(tt.h.Template(tt.ctx, tt.ociURI, tt.version, tt.namespace, tt.values, "1.22")).To(Equal(tt.wantTemplateContent), "helm.Template() should succeed return correct template content")
}
func TestHelmTemplateSuccessWithInsecure(t *testing.T) {
tt := newHelmTemplateTest(t, executables.WithInsecure())
expectCommand(
tt.e, tt.ctx, "template", tt.ociURI, "--version", tt.version, "--namespace", tt.namespace, "--kube-version", "1.22", "--insecure-skip-tls-verify", "-f", "-",
).withStdIn(tt.valuesYaml).withEnvVars(tt.envVars).to().Return(*bytes.NewBuffer(tt.wantTemplateContent), nil)
tt.Expect(tt.h.Template(tt.ctx, tt.ociURI, tt.version, tt.namespace, tt.values, "1.22")).To(Equal(tt.wantTemplateContent), "helm.Template() should succeed return correct template content")
}
func TestHelmTemplateSuccessWithRegistryMirror(t *testing.T) {
tt := newHelmTemplateTest(t, executables.WithRegistryMirror(®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
}))
ociRegistryMirror := "oci://1.2.3.4:443/account/charts"
expectCommand(
tt.e, tt.ctx, "template", ociRegistryMirror, "--version", tt.version, "--namespace", tt.namespace, "--kube-version", "1.22", "-f", "-",
).withStdIn(tt.valuesYaml).withEnvVars(tt.envVars).to().Return(*bytes.NewBuffer(tt.wantTemplateContent), nil)
tt.Expect(tt.h.Template(tt.ctx, ociRegistryMirror, tt.version, tt.namespace, tt.values, "1.22")).To(Equal(tt.wantTemplateContent), "helm.Template() should succeed return correct template content")
}
func TestHelmTemplateSuccessWithEnv(t *testing.T) {
tt := newHelmTemplateTest(t, executables.WithEnv(map[string]string{
"HTTPS_PROXY": "test1",
}))
expectedEnv := map[string]string{
"HTTPS_PROXY": "test1",
"HELM_EXPERIMENTAL_OCI": "1",
}
expectCommand(
tt.e, tt.ctx, "template", tt.ociURI, "--version", tt.version, "--namespace", tt.namespace, "--kube-version", "1.22", "-f", "-",
).withStdIn(tt.valuesYaml).withEnvVars(expectedEnv).to().Return(*bytes.NewBuffer(tt.wantTemplateContent), nil)
tt.Expect(tt.h.Template(tt.ctx, tt.ociURI, tt.version, tt.namespace, tt.values, "1.22")).To(Equal(tt.wantTemplateContent), "helm.Template() should succeed return correct template content")
}
func TestHelmTemplateErrorYaml(t *testing.T) {
tt := newHelmTemplateTest(t)
values := func() {}
_, gotErr := tt.h.Template(tt.ctx, tt.ociURI, tt.version, tt.namespace, values, "1.22")
tt.Expect(gotErr).To(HaveOccurred(), "helm.Template() should fail marshalling values to yaml")
tt.Expect(gotErr).To(MatchError(ContainSubstring("failed marshalling values for helm template: error marshaling into JSON")))
}
func TestHelmSaveChartSuccess(t *testing.T) {
tt := newHelmTest(t)
url := "url"
version := "1.1"
destinationFolder := "folder"
expectCommand(
tt.e, tt.ctx, "pull", url, "--version", version, "--destination", destinationFolder,
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.SaveChart(tt.ctx, url, version, destinationFolder)).To(Succeed())
}
func TestHelmSaveChartSuccessWithInsecure(t *testing.T) {
tt := newHelmTest(t, executables.WithInsecure())
url := "url"
version := "1.1"
destinationFolder := "folder"
expectCommand(
tt.e, tt.ctx, "pull", url, "--version", version, "--destination", destinationFolder, "--insecure-skip-tls-verify",
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.SaveChart(tt.ctx, url, version, destinationFolder)).To(Succeed())
}
func TestHelmSkipCRDs(t *testing.T) {
tt := newHelmTest(t)
url := "url"
version := "1.1"
kubeconfig := "kubeconfig"
chart := "chart"
expectCommand(
tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--skip-crds", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", constants.EksaPackagesName,
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, constants.EksaPackagesName, "", true, nil)).To(Succeed())
}
func TestHelmInstallChartSuccess(t *testing.T) {
tt := newHelmTest(t)
chart := "chart"
url := "url"
version := "1.1"
kubeconfig := "/root/.kube/config"
values := []string{"key1=value1"}
expectCommand(
tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages",
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", "", false, values)).To(Succeed())
}
func TestHelmInstallChartSuccessWithValuesFile(t *testing.T) {
tt := newHelmTest(t)
chart := "chart"
url := "url"
version := "1.1"
kubeconfig := "/root/.kube/config"
values := []string{"key1=value1"}
valuesFileName := "values.yaml"
expectCommand(
tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "-f", valuesFileName,
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", valuesFileName, false, values)).To(Succeed())
}
func TestHelmInstallChartSuccessWithInsecure(t *testing.T) {
tt := newHelmTest(t, executables.WithInsecure())
chart := "chart"
url := "url"
version := "1.1"
kubeconfig := "/root/.kube/config"
values := []string{"key1=value1"}
expectCommand(
tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "--insecure-skip-tls-verify",
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", "", false, values)).To(Succeed())
}
func TestHelmInstallChartSuccessWithInsecureAndValuesFile(t *testing.T) {
tt := newHelmTest(t, executables.WithInsecure())
chart := "chart"
url := "url"
version := "1.1"
kubeconfig := "/root/.kube/config"
values := []string{"key1=value1"}
valuesFileName := "values.yaml"
expectCommand(
tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "-f", valuesFileName, "--insecure-skip-tls-verify",
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", valuesFileName, false, values)).To(Succeed())
}
func TestHelmGetValueArgs(t *testing.T) {
tests := []struct {
testName string
values []string
wantValuesArgs []string
}{
{
testName: "single Helm value override",
values: []string{"key1=value1"},
wantValuesArgs: []string{"--set", "key1=value1"},
},
{
testName: "multiple Helm value overrides",
values: []string{"key1=value1", "key2=value2", "key3=value3"},
wantValuesArgs: []string{"--set", "key1=value1", "--set", "key2=value2", "--set", "key3=value3"},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if gotValuesArgs := executables.GetHelmValueArgs(tt.values); !sliceEqual(gotValuesArgs, tt.wantValuesArgs) {
t.Errorf("GetHelmValueArgs() = %v, want %v", gotValuesArgs, tt.wantValuesArgs)
}
})
}
}
func TestHelmInstallChartWithValuesFileSuccess(t *testing.T) {
tt := newHelmTest(t)
chart := "chart"
url := "url"
version := "1.1"
kubeconfig := "/root/.kube/config"
valuesFileName := "values.yaml"
expectCommand(
tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--values", valuesFileName, "--kubeconfig", kubeconfig, "--wait",
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.InstallChartWithValuesFile(tt.ctx, chart, url, version, kubeconfig, valuesFileName)).To(Succeed())
}
func TestHelmInstallChartWithValuesFileSuccessWithInsecure(t *testing.T) {
tt := newHelmTest(t, executables.WithInsecure())
chart := "chart"
url := "url"
version := "1.1"
kubeconfig := "/root/.kube/config"
valuesFileName := "values.yaml"
expectCommand(
tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--values", valuesFileName, "--kubeconfig", kubeconfig, "--wait", "--insecure-skip-tls-verify",
).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.InstallChartWithValuesFile(tt.ctx, chart, url, version, kubeconfig, valuesFileName)).To(Succeed())
}
func TestHelmListCharts(t *testing.T) {
tt := newHelmTest(t, executables.WithInsecure())
kubeconfig := "/root/.kube/config"
t.Run("Normal functionality", func(t *testing.T) {
output := []byte("eks-anywhere-packages\n")
expected := []string{"eks-anywhere-packages"}
expectCommand(tt.e, tt.ctx, "list", "-q", "--kubeconfig", kubeconfig).withEnvVars(tt.envVars).to().Return(*bytes.NewBuffer(output), nil)
tt.Expect(tt.h.ListCharts(tt.ctx, kubeconfig)).To(Equal(expected))
})
t.Run("Empty output", func(t *testing.T) {
expected := []string{}
expectCommand(tt.e, tt.ctx, "list", "-q", "--kubeconfig", kubeconfig).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
tt.Expect(tt.h.ListCharts(tt.ctx, kubeconfig)).To(Equal(expected))
})
t.Run("Errored out", func(t *testing.T) {
output := errors.New("Error")
var expected []string
expectCommand(tt.e, tt.ctx, "list", "-q", "--kubeconfig", kubeconfig).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, output)
result, err := tt.h.ListCharts(tt.ctx, kubeconfig)
tt.Expect(err).To(HaveOccurred())
tt.Expect(result).To(Equal(expected))
})
}
func TestHelmDelete(s *testing.T) {
kubeconfig := "/root/.kube/config"
s.Run("Success", func(t *testing.T) {
tt := newHelmTest(s)
installName := "test-install"
expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
err := tt.h.Delete(tt.ctx, kubeconfig, installName, "")
tt.Expect(err).NotTo(HaveOccurred())
})
s.Run("passes the namespace, if present", func(t *testing.T) {
tt := newHelmTest(s)
testNamespace := "testing"
installName := "test-install"
expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig, "--namespace", testNamespace).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
err := tt.h.Delete(tt.ctx, kubeconfig, installName, testNamespace)
tt.Expect(err).NotTo(HaveOccurred())
})
s.Run("passes the insecure skip flag", func(t *testing.T) {
tt := newHelmTest(t, executables.WithInsecure())
installName := "test-install"
expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig, "--insecure-skip-tls-verify").withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil)
err := tt.h.Delete(tt.ctx, kubeconfig, installName, "")
tt.Expect(err).NotTo(HaveOccurred())
})
s.Run("returns errors from the helm executable", func(t *testing.T) {
tt := newHelmTest(s)
installName := "test-install"
expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, fmt.Errorf("test error"))
err := tt.h.Delete(tt.ctx, kubeconfig, installName, "")
tt.Expect(err).To(HaveOccurred())
})
}
| 332 |
eks-anywhere | aws | Go | package executables_test
import (
"context"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/executables/mocks"
)
type commandExpect struct {
command *executables.Command
e *mocks.MockExecutable
}
func expectCommand(e *mocks.MockExecutable, ctx context.Context, args ...string) *commandExpect {
e.EXPECT().Command(ctx, args).Return(executables.NewCommand(ctx, e, args...))
return &commandExpect{
command: executables.NewCommand(ctx, e, args...),
e: e,
}
}
func (c *commandExpect) withEnvVars(envVars map[string]string) *commandExpect {
c.command.WithEnvVars(envVars)
return c
}
func (c *commandExpect) withStdIn(stdIn []byte) *commandExpect {
c.command.WithStdIn(stdIn)
return c
}
func (c *commandExpect) to() *gomock.Call {
return c.e.EXPECT().Run(c.command)
}
func sliceEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[v]++
}
for _, v := range b {
if _, ok := m[v]; !ok {
return false
}
m[v] -= 1
if m[v] == 0 {
delete(m, v)
}
}
return len(m) == 0
}
| 59 |
eks-anywhere | aws | Go | package executables
import (
"bufio"
"context"
_ "embed"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/registrymirror/containerd"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
)
const kindPath = "kind"
//go:embed config/kind.yaml
var kindConfigTemplate string
const configFileName = "kind_tmp.yaml"
type Kind struct {
writer filewriter.FileWriter
Executable
execConfig *kindExecConfig
}
// kindExecConfig contains transient information for the execution of kind commands
// It's used by BootstrapClusterClientOption's to store/change information prior to a command execution
// It must be cleaned after each execution to prevent side effects from past executions options.
type kindExecConfig struct {
env map[string]string
ConfigFile string
KindImage string
KubernetesRepository string
EtcdRepository string
EtcdVersion string
CorednsRepository string
CorednsVersion string
KubernetesVersion string
RegistryMirrorMap map[string]string
MirrorBase string
RegistryCACertPath string
RegistryAuth bool
RegistryUsername string
RegistryPassword string
ExtraPortMappings []int
DockerExtraMounts bool
DisableDefaultCNI bool
}
func NewKind(executable Executable, writer filewriter.FileWriter) *Kind {
return &Kind{
writer: writer,
Executable: executable,
}
}
func (k *Kind) CreateBootstrapCluster(ctx context.Context, clusterSpec *cluster.Spec, opts ...bootstrapper.BootstrapClusterClientOption) (kubeconfig string, err error) {
err = k.setupExecConfig(clusterSpec)
if err != nil {
return "", err
}
defer k.cleanExecConfig()
err = processOpts(opts)
if err != nil {
return "", err
}
err = k.buildConfigFile()
if err != nil {
return "", err
}
kubeconfigName, err := k.createKubeConfig(clusterSpec.Cluster.Name, []byte(""))
if err != nil {
return "", err
}
executionArgs := k.execArguments(clusterSpec.Cluster.Name, kubeconfigName)
logger.V(4).Info("Creating kind cluster", "name", getInternalName(clusterSpec.Cluster.Name), "kubeconfig", kubeconfigName)
_, err = k.ExecuteWithEnv(ctx, k.execConfig.env, executionArgs...)
if err != nil {
return "", fmt.Errorf("executing create cluster: %v", err)
}
return kubeconfigName, nil
}
func (k *Kind) ClusterExists(ctx context.Context, clusterName string) (bool, error) {
internalName := getInternalName(clusterName)
stdOut, err := k.Execute(ctx, "get", "clusters")
if err != nil {
return false, fmt.Errorf("executing get clusters: %v", err)
}
logger.V(5).Info("Executed kind get clusters", "response", stdOut.String())
scanner := bufio.NewScanner(&stdOut)
for scanner.Scan() {
if kindClusterName := scanner.Text(); kindClusterName == internalName {
return true, nil
}
}
if err := scanner.Err(); err != nil {
return false, fmt.Errorf("failed checking if cluster exists when reading kind get cluster response: %v", err)
}
return false, nil
}
func (k *Kind) GetKubeconfig(ctx context.Context, clusterName string) (string, error) {
internalName := getInternalName(clusterName)
stdOut, err := k.Execute(ctx, "get", "kubeconfig", "--name", internalName)
if err != nil {
return "", fmt.Errorf("executing get kubeconfig: %v", err)
}
return k.createKubeConfig(clusterName, stdOut.Bytes())
}
func (k *Kind) WithExtraDockerMounts() bootstrapper.BootstrapClusterClientOption {
return func() error {
if k.execConfig == nil {
return errors.New("kind exec config is not ready")
}
k.execConfig.DockerExtraMounts = true
return nil
}
}
func (k *Kind) WithExtraPortMappings(ports []int) bootstrapper.BootstrapClusterClientOption {
return func() error {
if k.execConfig == nil {
return errors.New("kind exec config is not ready")
}
if len(ports) == 0 {
return errors.New("no ports found in the list")
}
k.execConfig.ExtraPortMappings = ports
return nil
}
}
func (k *Kind) WithEnv(env map[string]string) bootstrapper.BootstrapClusterClientOption {
return func() error {
if k.execConfig == nil {
return errors.New("kind exec config is not ready")
}
for name, value := range env {
k.execConfig.env[name] = value
}
return nil
}
}
func (k *Kind) DeleteBootstrapCluster(ctx context.Context, cluster *types.Cluster) error {
internalName := getInternalName(cluster.Name)
logger.V(4).Info("Deleting kind cluster", "name", internalName)
_, err := k.Execute(ctx, "delete", "cluster", "--name", internalName)
if err != nil {
return fmt.Errorf("executing delete cluster: %v", err)
}
return err
}
func (k *Kind) setupExecConfig(clusterSpec *cluster.Spec) error {
bundle := clusterSpec.VersionsBundle
registryMirror := registrymirror.FromCluster(clusterSpec.Cluster)
k.execConfig = &kindExecConfig{
KindImage: registryMirror.ReplaceRegistry(bundle.EksD.KindNode.VersionedImage()),
KubernetesRepository: registryMirror.ReplaceRegistry(bundle.KubeDistro.Kubernetes.Repository),
KubernetesVersion: bundle.KubeDistro.Kubernetes.Tag,
EtcdRepository: registryMirror.ReplaceRegistry(bundle.KubeDistro.Etcd.Repository),
EtcdVersion: bundle.KubeDistro.Etcd.Tag,
CorednsRepository: registryMirror.ReplaceRegistry(bundle.KubeDistro.CoreDNS.Repository),
CorednsVersion: bundle.KubeDistro.CoreDNS.Tag,
env: make(map[string]string),
}
if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil {
k.execConfig.MirrorBase = registryMirror.BaseRegistry
k.execConfig.RegistryMirrorMap = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap)
if registryMirror.CACertContent != "" {
path := filepath.Join(clusterSpec.Cluster.Name, "generated", "certs.d", registryMirror.BaseRegistry)
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return err
}
if err := os.WriteFile(filepath.Join(path, "ca.crt"), []byte(registryMirror.CACertContent), 0o644); err != nil {
return errors.New("error writing the registry certification file")
}
k.execConfig.RegistryCACertPath = filepath.Join(clusterSpec.Cluster.Name, "generated", "certs.d")
}
if registryMirror.Auth {
k.execConfig.RegistryAuth = registryMirror.Auth
username, password, err := config.ReadCredentials()
if err != nil {
return err
}
k.execConfig.RegistryUsername = username
k.execConfig.RegistryPassword = password
}
}
return nil
}
func (k *Kind) cleanExecConfig() {
k.execConfig = nil
}
func (k *Kind) buildConfigFile() error {
t := templater.New(k.writer)
writtenFileName, err := t.WriteToFile(kindConfigTemplate, k.execConfig, configFileName)
if err != nil {
return fmt.Errorf("creating file for kind config: %v", err)
}
k.execConfig.ConfigFile = writtenFileName
return nil
}
func (k *Kind) execArguments(clusterName string, kubeconfigName string) []string {
return []string{
"create", "cluster",
"--name", getInternalName(clusterName),
"--kubeconfig", kubeconfigName,
"--image", k.execConfig.KindImage,
"--config", k.execConfig.ConfigFile,
}
}
func (k *Kind) createKubeConfig(clusterName string, content []byte) (string, error) {
fileName, err := k.writer.Write(fmt.Sprintf("%s.kind.kubeconfig", clusterName), content)
if err != nil {
return "", fmt.Errorf("generating temp file for storing kind kubeconfig: %v", err)
}
return fileName, nil
}
func processOpts(opts []bootstrapper.BootstrapClusterClientOption) error {
for _, opt := range opts {
err := opt()
if err != nil {
return err
}
}
return nil
}
func getInternalName(clusterName string) string {
return fmt.Sprintf("%s-eks-a-cluster", clusterName)
}
| 269 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"errors"
"fmt"
"net"
"os"
"strings"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/types"
)
type testKindOption func(k *executables.Kind) bootstrapper.BootstrapClusterClientOption
func TestKindCreateBootstrapClusterSuccess(t *testing.T) {
_, writer := test.NewWriter(t)
clusterName := "test_cluster"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.VersionsBundle = versionBundle
})
eksClusterName := "test_cluster-eks-a-cluster"
kubeConfigFile := "test_cluster.kind.kubeconfig"
kindImage := "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.2"
// Initialize gomock
mockCtrl := gomock.NewController(t)
tests := []struct {
name string
wantKubeconfig string
env map[string]string
options []testKindOption
wantKindConfig string
}{
{
name: "No options",
wantKubeconfig: kubeConfigFile,
options: nil,
env: map[string]string{},
wantKindConfig: "testdata/kind_config.yaml",
},
{
name: "With env option",
wantKubeconfig: kubeConfigFile,
options: []testKindOption{
func(k *executables.Kind) bootstrapper.BootstrapClusterClientOption {
return k.WithEnv(map[string]string{"ENV_VAR1": "VALUE1", "ENV_VAR2": "VALUE2"})
},
},
env: map[string]string{"ENV_VAR1": "VALUE1", "ENV_VAR2": "VALUE2"},
wantKindConfig: "testdata/kind_config.yaml",
},
{
name: "With docker option",
wantKubeconfig: kubeConfigFile,
options: []testKindOption{
func(k *executables.Kind) bootstrapper.BootstrapClusterClientOption {
return k.WithExtraDockerMounts()
},
},
env: map[string]string{},
wantKindConfig: "testdata/kind_config_docker_mount_networking.yaml",
},
{
name: "With extra port mappings option",
wantKubeconfig: kubeConfigFile,
options: []testKindOption{
func(k *executables.Kind) bootstrapper.BootstrapClusterClientOption {
return k.WithExtraPortMappings([]int{80, 443})
},
},
env: map[string]string{},
wantKindConfig: "testdata/kind_config_extra_port_mappings.yaml",
},
{
name: "With docker option and env option",
wantKubeconfig: kubeConfigFile,
options: []testKindOption{
func(k *executables.Kind) bootstrapper.BootstrapClusterClientOption {
return k.WithEnv(map[string]string{"ENV_VAR1": "VALUE1", "ENV_VAR2": "VALUE2"})
},
func(k *executables.Kind) bootstrapper.BootstrapClusterClientOption {
return k.WithExtraDockerMounts()
},
},
env: map[string]string{"ENV_VAR1": "VALUE1", "ENV_VAR2": "VALUE2"},
wantKindConfig: "testdata/kind_config_docker_mount_networking.yaml",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
var (
spec *cluster.Spec
image string
)
spec = clusterSpec
image = kindImage
executable.EXPECT().ExecuteWithEnv(
ctx,
tt.env,
"create", "cluster", "--name", eksClusterName, "--kubeconfig", test.OfType("string"), "--image", image, "--config", test.OfType("string"),
).Return(bytes.Buffer{}, nil).Times(1).Do(
func(ctx context.Context, envs map[string]string, args ...string) (stdout bytes.Buffer, err error) {
gotKindConfig := args[9]
test.AssertFilesEquals(t, gotKindConfig, tt.wantKindConfig)
return bytes.Buffer{}, nil
},
)
k := executables.NewKind(executable, writer)
gotKubeconfig, err := k.CreateBootstrapCluster(ctx, spec, testOptionsToBootstrapOptions(k, tt.options)...)
if err != nil {
t.Fatalf("CreateBootstrapCluster() error = %v, wantErr %v", err, nil)
}
if !strings.HasSuffix(gotKubeconfig, tt.wantKubeconfig) {
t.Errorf("CreateBootstrapCluster() gotKubeconfig = %v, want to end with %v", gotKubeconfig, tt.wantKubeconfig)
}
})
}
}
func TestKindCreateBootstrapClusterSuccessWithRegistryMirror(t *testing.T) {
_, writer := test.NewWriter(t)
clusterName := "test_cluster"
eksClusterName := "test_cluster-eks-a-cluster"
kubeConfigFile := "test_cluster.kind.kubeconfig"
registryMirror := "registry-mirror.test"
registryMirrorWithPort := net.JoinHostPort(registryMirror, constants.DefaultHttpsPort)
// Initialize gomock
mockCtrl := gomock.NewController(t)
tests := []struct {
name string
wantKubeconfig string
env map[string]string
clusterSpec *cluster.Spec
options []testKindOption
wantKindConfig string
}{
{
name: "With registry mirror option, no CA cert provided",
wantKubeconfig: kubeConfigFile,
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.VersionsBundle = versionBundle
s.Cluster.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{
Endpoint: registryMirror,
Port: constants.DefaultHttpsPort,
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
}
}),
env: map[string]string{},
wantKindConfig: "testdata/kind_config_registry_mirror_insecure.yaml",
},
{
name: "With registry mirror option, with CA cert",
wantKubeconfig: kubeConfigFile,
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.VersionsBundle = versionBundle
s.Cluster.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{
Endpoint: registryMirror,
Port: constants.DefaultHttpsPort,
CACertContent: "test",
}
}),
env: map[string]string{},
wantKindConfig: "testdata/kind_config_registry_mirror_with_ca.yaml",
},
{
name: "With registry mirror option, with auth",
wantKubeconfig: kubeConfigFile,
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.VersionsBundle = versionBundle
s.Cluster.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{
Endpoint: registryMirror,
Port: constants.DefaultHttpsPort,
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
{
Registry: "783794618700.dkr.ecr.us-west-2.amazonaws.com",
Namespace: "curated-packages",
},
},
Authenticate: true,
}
}),
env: map[string]string{},
wantKindConfig: "testdata/kind_config_registry_mirror_with_auth.yaml",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
executable := mockexecutables.NewMockExecutable(mockCtrl)
var (
spec *cluster.Spec
image string
)
spec = tt.clusterSpec
registry := registryMirrorWithPort
if r, ok := registrymirror.FromCluster(spec.Cluster).NamespacedRegistryMap[constants.DefaultCoreEKSARegistry]; ok {
registry = r
}
image = fmt.Sprintf("%s/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.2", registry)
if spec.Cluster.Spec.RegistryMirrorConfiguration.Authenticate {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
executable.EXPECT().ExecuteWithEnv(
ctx,
tt.env,
"create", "cluster", "--name", eksClusterName, "--kubeconfig", test.OfType("string"), "--image", image, "--config", test.OfType("string"),
).Return(bytes.Buffer{}, nil).Times(1).Do(
func(ctx context.Context, envs map[string]string, args ...string) (stdout bytes.Buffer, err error) {
gotKindConfig := args[9]
test.AssertFilesEquals(t, gotKindConfig, tt.wantKindConfig)
return bytes.Buffer{}, nil
},
)
k := executables.NewKind(executable, writer)
gotKubeconfig, err := k.CreateBootstrapCluster(ctx, spec, testOptionsToBootstrapOptions(k, tt.options)...)
if err != nil {
t.Fatalf("CreateBootstrapCluster() error = %v, wantErr %v", err, nil)
}
if !strings.HasSuffix(gotKubeconfig, tt.wantKubeconfig) {
t.Errorf("CreateBootstrapCluster() gotKubeconfig = %v, want to end with %v", gotKubeconfig, tt.wantKubeconfig)
}
})
}
}
func TestKindCreateBootstrapClusterExecutableError(t *testing.T) {
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = "clusterName"
s.VersionsBundle = versionBundle
})
ctx := context.Background()
_, writer := test.NewWriter(t)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().ExecuteWithEnv(ctx, map[string]string{}, gomock.Any()).Return(bytes.Buffer{}, errors.New("error from execute with env"))
k := executables.NewKind(executable, writer)
gotKubeconfig, err := k.CreateBootstrapCluster(ctx, clusterSpec)
if err == nil {
t.Fatal("Kind.CreateBootstrapCluster() error = nil")
}
if gotKubeconfig != "" {
t.Errorf("CreateBootstrapCluster() gotKubeconfig = %v, want empty string", gotKubeconfig)
}
}
func TestKindCreateBootstrapClusterExecutableWithRegistryMirrorError(t *testing.T) {
registryMirror := "registry-mirror.test"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = "clusterName"
s.VersionsBundle = versionBundle
s.Cluster.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{
Endpoint: registryMirror,
Port: constants.DefaultHttpsPort,
Authenticate: true,
}
})
if err := os.Unsetenv("REGISTRY_USERNAME"); err != nil {
t.Fatalf(err.Error())
}
if err := os.Unsetenv("REGISTRY_PASSWORD"); err != nil {
t.Fatalf(err.Error())
}
ctx := context.Background()
_, writer := test.NewWriter(t)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
k := executables.NewKind(executable, writer)
gotKubeconfig, err := k.CreateBootstrapCluster(ctx, clusterSpec)
if err == nil {
t.Fatal("Kind.CreateBootstrapCluster() error = nil")
}
if gotKubeconfig != "" {
t.Errorf("CreateBootstrapCluster() gotKubeconfig = %v, want empty string", gotKubeconfig)
}
}
func testOptionsToBootstrapOptions(k *executables.Kind, testOpts []testKindOption) []bootstrapper.BootstrapClusterClientOption {
opts := make([]bootstrapper.BootstrapClusterClientOption, 0, len(testOpts))
for _, opt := range testOpts {
opts = append(opts, opt(k))
}
return opts
}
func TestKindDeleteBootstrapClusterSuccess(t *testing.T) {
cluster := &types.Cluster{
Name: "clusterName",
}
ctx := context.Background()
_, writer := test.NewWriter(t)
internalName := fmt.Sprintf("%s-eks-a-cluster", cluster.Name)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
expectedParam := []string{"delete", "cluster", "--name", internalName}
executable.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
k := executables.NewKind(executable, writer)
if err := k.DeleteBootstrapCluster(ctx, cluster); err != nil {
t.Fatalf("Kind.DeleteBootstrapCluster() error = %v, want nil", err)
}
}
func TestKindDeleteBootstrapClusterExecutableError(t *testing.T) {
cluster := &types.Cluster{
Name: "clusterName",
}
ctx := context.Background()
_, writer := test.NewWriter(t)
internalName := fmt.Sprintf("%s-eks-a-cluster", cluster.Name)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
expectedParam := []string{"delete", "cluster", "--name", internalName}
executable.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
k := executables.NewKind(executable, writer)
if err := k.DeleteBootstrapCluster(ctx, cluster); err == nil {
t.Fatalf("Kind.DeleteBootstrapCluster() error = %v, want not nil", err)
}
}
func TestKindClusterExists(t *testing.T) {
tests := []struct {
testName string
clusterName string
internalName string
kindResponse string
}{
{
testName: "one cluster",
clusterName: "cluster-name",
internalName: "cluster-name-eks-a-cluster",
kindResponse: "cluster-name-eks-a-cluster",
},
{
testName: "3 clusters",
clusterName: "cluster-name-2",
internalName: "cluster-name-2-eks-a-cluster",
kindResponse: "cluster-name-eks-a-cluster\ncluster-name-eks-a-cluster-6\ncluster-name-2-eks-a-cluster",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
ctx := context.Background()
_, writer := test.NewWriter(t)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "get", "clusters").Return(*bytes.NewBufferString(tt.kindResponse), nil)
k := executables.NewKind(executable, writer)
clusterExists, err := k.ClusterExists(ctx, tt.clusterName)
if err != nil {
t.Fatalf("Kind.ClusterExists() error = %v, wantErr nil", err)
}
if !clusterExists {
t.Fatal("ClusterExists() clusterExists = false, want true")
}
})
}
}
func TestKindGetKubeconfig(t *testing.T) {
clusterName := "cluster-name"
ctx := context.Background()
_, writer := test.NewWriter(t)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
executable.EXPECT().Execute(ctx, "get", "kubeconfig", "--name", fmt.Sprintf("%s-eks-a-cluster", clusterName)).Return(bytes.Buffer{}, nil)
k := executables.NewKind(executable, writer)
_, err := k.GetKubeconfig(ctx, clusterName)
if err != nil {
t.Fatalf("Kind.GetKubeconfig() error = %v, wantErr nil", err)
}
}
| 430 |
eks-anywhere | aws | Go | package executables
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"math"
"regexp"
"sort"
"strconv"
"strings"
"time"
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/pkg/errors"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/version"
cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta1"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
addons "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/rufiounreleased"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
kubectlPath = "kubectl"
timeoutPrecision = 2
minimumWaitTimeout = 0.01 // Smallest express-able timeout value given the precision
networkFaultBaseRetryTime = 10 * time.Second
networkFaultBackoffFactor = 1.5
lastAppliedAnnotation = "kubectl.kubernetes.io/last-applied-configuration"
)
var (
capiClustersResourceType = fmt.Sprintf("clusters.%s", clusterv1.GroupVersion.Group)
capiProvidersResourceType = fmt.Sprintf("providers.clusterctl.%s", clusterv1.GroupVersion.Group)
capiMachinesType = fmt.Sprintf("machines.%s", clusterv1.GroupVersion.Group)
capiMachineDeploymentsType = fmt.Sprintf("machinedeployments.%s", clusterv1.GroupVersion.Group)
capiMachineSetsType = fmt.Sprintf("machinesets.%s", clusterv1.GroupVersion.Group)
eksaClusterResourceType = fmt.Sprintf("clusters.%s", v1alpha1.GroupVersion.Group)
eksaVSphereDatacenterResourceType = fmt.Sprintf("vspheredatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaVSphereMachineResourceType = fmt.Sprintf("vspheremachineconfigs.%s", v1alpha1.GroupVersion.Group)
vsphereMachineTemplatesType = fmt.Sprintf("vspheremachinetemplates.infrastructure.%s", clusterv1.GroupVersion.Group)
eksaTinkerbellDatacenterResourceType = fmt.Sprintf("tinkerbelldatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaTinkerbellMachineResourceType = fmt.Sprintf("tinkerbellmachineconfigs.%s", v1alpha1.GroupVersion.Group)
TinkerbellHardwareResourceType = fmt.Sprintf("hardware.%s", tinkv1alpha1.GroupVersion.Group)
rufioMachineResourceType = fmt.Sprintf("machines.%s", rufiov1alpha1.GroupVersion.Group)
eksaCloudStackDatacenterResourceType = fmt.Sprintf("cloudstackdatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaCloudStackMachineResourceType = fmt.Sprintf("cloudstackmachineconfigs.%s", v1alpha1.GroupVersion.Group)
cloudstackMachineTemplatesType = fmt.Sprintf("cloudstackmachinetemplates.infrastructure.%s", clusterv1.GroupVersion.Group)
eksaNutanixDatacenterResourceType = fmt.Sprintf("nutanixdatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaNutanixMachineResourceType = fmt.Sprintf("nutanixmachineconfigs.%s", v1alpha1.GroupVersion.Group)
eksaAwsResourceType = fmt.Sprintf("awsdatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaGitOpsResourceType = fmt.Sprintf("gitopsconfigs.%s", v1alpha1.GroupVersion.Group)
eksaFluxConfigResourceType = fmt.Sprintf("fluxconfigs.%s", v1alpha1.GroupVersion.Group)
eksaOIDCResourceType = fmt.Sprintf("oidcconfigs.%s", v1alpha1.GroupVersion.Group)
eksaAwsIamResourceType = fmt.Sprintf("awsiamconfigs.%s", v1alpha1.GroupVersion.Group)
etcdadmClustersResourceType = fmt.Sprintf("etcdadmclusters.%s", etcdv1.GroupVersion.Group)
bundlesResourceType = fmt.Sprintf("bundles.%s", releasev1alpha1.GroupVersion.Group)
clusterResourceSetResourceType = fmt.Sprintf("clusterresourcesets.%s", addons.GroupVersion.Group)
kubeadmControlPlaneResourceType = fmt.Sprintf("kubeadmcontrolplanes.controlplane.%s", clusterv1.GroupVersion.Group)
eksdReleaseType = fmt.Sprintf("releases.%s", eksdv1alpha1.GroupVersion.Group)
eksaPackagesType = fmt.Sprintf("packages.%s", packagesv1.GroupVersion.Group)
eksaPackagesBundleControllerType = fmt.Sprintf("packagebundlecontroller.%s", packagesv1.GroupVersion.Group)
eksaPackageBundlesType = fmt.Sprintf("packagebundles.%s", packagesv1.GroupVersion.Group)
kubectlConnectionRefusedRegex = regexp.MustCompile("The connection to the server .* was refused")
kubectlIoTimeoutRegex = regexp.MustCompile("Unable to connect to the server.*i/o timeout.*")
)
type Kubectl struct {
Executable
// networkFaultBackoffFactor drives the exponential backoff wait
// for transient network failures during retry operations.
networkFaultBackoffFactor float64
// networkFaultBaseRetryTime drives the base time wait for the
// exponential backoff for transient network failures during retry operations.
networkFaultBaseRetryTime time.Duration
}
// KubectlConfigOpt configures Kubectl on construction.
type KubectlConfigOpt func(*Kubectl)
// NewKubectl builds a new Kubectl.
func NewKubectl(executable Executable, opts ...KubectlConfigOpt) *Kubectl {
k := &Kubectl{
Executable: executable,
networkFaultBackoffFactor: networkFaultBackoffFactor,
networkFaultBaseRetryTime: networkFaultBaseRetryTime,
}
for _, opt := range opts {
opt(k)
}
return k
}
// WithKubectlNetworkFaultBaseRetryTime configures the base time wait for the
// exponential backoff for transient network failures during retry operations.
func WithKubectlNetworkFaultBaseRetryTime(wait time.Duration) KubectlConfigOpt {
return func(k *Kubectl) {
k.networkFaultBaseRetryTime = wait
}
}
// WithNetworkFaultBackoffFactor configures the exponential backoff wait
// for transient network failures during retry operations.
func WithNetworkFaultBackoffFactor(factor float64) KubectlConfigOpt {
return func(k *Kubectl) {
k.networkFaultBackoffFactor = factor
}
}
type capiMachinesResponse struct {
Items []clusterv1.Machine
}
// GetCAPIMachines returns all the CAPI machines for the provided clusterName.
func (k *Kubectl) GetCAPIMachines(ctx context.Context, cluster *types.Cluster, clusterName string) ([]clusterv1.Machine, error) {
params := []string{
"get", capiMachinesType, "-o", "json", "--kubeconfig", cluster.KubeconfigFile,
"--selector=cluster.x-k8s.io/cluster-name=" + clusterName,
"--namespace", constants.EksaSystemNamespace,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting machines: %v", err)
}
response := &capiMachinesResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get machines response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) SearchCloudStackMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.CloudStackMachineConfig, error) {
params := []string{
"get", eksaCloudStackMachineResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa CloudStackMachineConfigResponse: %v", err)
}
response := &CloudStackMachineConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing CloudStackMachineConfigResponse response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) SearchCloudStackDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.CloudStackDatacenterConfig, error) {
params := []string{
"get", eksaCloudStackDatacenterResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa CloudStackDatacenterConfigResponse: %v", err)
}
response := &CloudStackDatacenterConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing CloudStackDatacenterConfigResponse response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) GetEksaCloudStackMachineConfig(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackMachineConfig, error) {
response := &v1alpha1.CloudStackMachineConfig{}
err := k.GetObject(ctx, eksaCloudStackMachineResourceType, cloudstackMachineConfigName, namespace, kubeconfigFile, response)
if err != nil {
return nil, fmt.Errorf("getting eksa cloudstack machineconfig: %v", err)
}
return response, nil
}
func (k *Kubectl) DeleteEksaCloudStackDatacenterConfig(ctx context.Context, cloudstackDatacenterConfigName string, kubeconfigFile string, namespace string) error {
params := []string{"delete", eksaCloudStackDatacenterResourceType, cloudstackDatacenterConfigName, "--kubeconfig", kubeconfigFile, "--namespace", namespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting cloudstackdatacenterconfig cluster %s apply: %v", cloudstackDatacenterConfigName, err)
}
return nil
}
func (k *Kubectl) GetEksaCloudStackDatacenterConfig(ctx context.Context, cloudstackDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackDatacenterConfig, error) {
response := &v1alpha1.CloudStackDatacenterConfig{}
err := k.GetObject(ctx, eksaCloudStackDatacenterResourceType, cloudstackDatacenterConfigName, namespace, kubeconfigFile, response)
if err != nil {
return nil, fmt.Errorf("getting eksa cloudstack datacenterconfig: %v", err)
}
return response, nil
}
func (k *Kubectl) DeleteEksaCloudStackMachineConfig(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) error {
params := []string{"delete", eksaCloudStackMachineResourceType, cloudstackMachineConfigName, "--kubeconfig", kubeconfigFile, "--namespace", namespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting cloudstackmachineconfig cluster %s apply: %v", cloudstackMachineConfigName, err)
}
return nil
}
type VersionResponse struct {
ClientVersion version.Info `json:"clientVersion"`
ServerVersion version.Info `json:"serverVersion"`
}
func (k *Kubectl) GetNamespace(ctx context.Context, kubeconfig string, namespace string) error {
params := []string{"get", "namespace", namespace, "--kubeconfig", kubeconfig}
_, err := k.Execute(ctx, params...)
return err
}
func (k *Kubectl) CreateNamespace(ctx context.Context, kubeconfig string, namespace string) error {
params := []string{"create", "namespace", namespace, "--kubeconfig", kubeconfig}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("creating namespace %v: %v", namespace, err)
}
return nil
}
func (k *Kubectl) CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error {
if err := k.GetNamespace(ctx, kubeconfig, namespace); err != nil {
return k.CreateNamespace(ctx, kubeconfig, namespace)
}
return nil
}
func (k *Kubectl) DeleteNamespace(ctx context.Context, kubeconfig string, namespace string) error {
params := []string{"delete", "namespace", namespace, "--kubeconfig", kubeconfig}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("creating namespace %v: %v", namespace, err)
}
return nil
}
func (k *Kubectl) LoadSecret(ctx context.Context, secretObject string, secretObjectType string, secretObjectName string, kubeConfFile string) error {
params := []string{"create", "secret", "generic", secretObjectName, "--type", secretObjectType, "--from-literal", secretObject, "--kubeconfig", kubeConfFile, "--namespace", constants.EksaSystemNamespace}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("loading secret: %v", err)
}
return nil
}
// ApplyManifest uses client-side logic to create/update objects defined in a yaml manifest.
func (k *Kubectl) ApplyManifest(ctx context.Context, kubeconfigPath, manifestPath string) error {
if _, err := k.Execute(ctx, "apply", "-f", manifestPath, "--kubeconfig", kubeconfigPath); err != nil {
return fmt.Errorf("executing apply manifest: %v", err)
}
return nil
}
func (k *Kubectl) ApplyKubeSpecWithNamespace(ctx context.Context, cluster *types.Cluster, spec string, namespace string) error {
params := []string{"apply", "-f", spec, "--namespace", namespace}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("executing apply: %v", err)
}
return nil
}
func (k *Kubectl) ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error {
params := []string{"apply", "-f", "-"}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
_, err := k.ExecuteWithStdin(ctx, data, params...)
if err != nil {
return fmt.Errorf("executing apply: %v", err)
}
return nil
}
func (k *Kubectl) ApplyKubeSpecFromBytesWithNamespace(ctx context.Context, cluster *types.Cluster, data []byte, namespace string) error {
if len(data) == 0 {
logger.V(6).Info("Skipping applying empty kube spec from bytes")
return nil
}
params := []string{"apply", "-f", "-", "--namespace", namespace}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
_, err := k.ExecuteWithStdin(ctx, data, params...)
if err != nil {
return fmt.Errorf("executing apply: %v", err)
}
return nil
}
func (k *Kubectl) ApplyKubeSpecFromBytesForce(ctx context.Context, cluster *types.Cluster, data []byte) error {
params := []string{"apply", "-f", "-", "--force"}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
_, err := k.ExecuteWithStdin(ctx, data, params...)
if err != nil {
return fmt.Errorf("executing apply --force: %v", err)
}
return nil
}
// DeleteManifest uses client-side logic to delete objects defined in a yaml manifest.
func (k *Kubectl) DeleteManifest(ctx context.Context, kubeconfigPath, manifestPath string, opts ...KubectlOpt) error {
params := []string{
"delete", "-f", manifestPath, "--kubeconfig", kubeconfigPath,
}
applyOpts(¶ms, opts...)
if _, err := k.Execute(ctx, params...); err != nil {
return fmt.Errorf("executing apply manifest: %v", err)
}
return nil
}
func (k *Kubectl) DeleteKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error {
params := []string{"delete", "-f", "-"}
if cluster.KubeconfigFile != "" {
params = append(params, "--kubeconfig", cluster.KubeconfigFile)
}
_, err := k.ExecuteWithStdin(ctx, data, params...)
if err != nil {
return fmt.Errorf("executing apply: %v", err)
}
return nil
}
func (k *Kubectl) WaitForClusterReady(ctx context.Context, cluster *types.Cluster, timeout string, clusterName string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, "Ready", fmt.Sprintf("%s/%s", capiClustersResourceType, clusterName), constants.EksaSystemNamespace)
}
func (k *Kubectl) WaitForControlPlaneReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, "ControlPlaneReady", fmt.Sprintf("%s/%s", capiClustersResourceType, newClusterName), constants.EksaSystemNamespace)
}
// WaitForControlPlaneAvailable blocks until the first control plane is available.
func (k *Kubectl) WaitForControlPlaneAvailable(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, "ControlPlaneInitialized", fmt.Sprintf("%s/%s", capiClustersResourceType, newClusterName), constants.EksaSystemNamespace)
}
func (k *Kubectl) WaitForControlPlaneNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, "ControlPlaneReady=false", fmt.Sprintf("%s/%s", capiClustersResourceType, newClusterName), constants.EksaSystemNamespace)
}
func (k *Kubectl) WaitForManagedExternalEtcdReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, "ManagedEtcdReady", fmt.Sprintf("clusters.%s/%s", clusterv1.GroupVersion.Group, newClusterName), constants.EksaSystemNamespace)
}
func (k *Kubectl) WaitForManagedExternalEtcdNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, "ManagedEtcdReady=false", fmt.Sprintf("clusters.%s/%s", clusterv1.GroupVersion.Group, newClusterName), constants.EksaSystemNamespace)
}
func (k *Kubectl) WaitForMachineDeploymentReady(ctx context.Context, cluster *types.Cluster, timeout string, machineDeploymentName string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, "Ready=true", fmt.Sprintf("%s/%s", capiMachineDeploymentsType, machineDeploymentName), constants.EksaSystemNamespace)
}
// WaitForService blocks until an IP address is assigned.
//
// Until more generic status matching comes around (possibly in 1.23), poll
// the service, checking for an IP address. Would you like to know more?
// https://github.com/kubernetes/kubernetes/issues/83094
func (k *Kubectl) WaitForService(ctx context.Context, kubeconfig string, timeout string, target string, namespace string) error {
timeoutDur, err := time.ParseDuration(timeout)
if err != nil {
return fmt.Errorf("parsing duration %q: %w", timeout, err)
}
timeoutCtx, cancel := context.WithTimeout(ctx, timeoutDur)
defer cancel()
timedOut := timeoutCtx.Done()
const pollInterval = time.Second
ticker := time.NewTicker(pollInterval)
defer ticker.Stop()
svc := &corev1.Service{}
for {
select {
case <-timedOut:
return timeoutCtx.Err()
case <-ticker.C:
err := k.GetObject(ctx, "service", target, namespace, kubeconfig, svc)
if err != nil {
logger.V(6).Info("failed to poll service", "target", target, "namespace", namespace, "error", err)
continue
}
for _, ingress := range svc.Status.LoadBalancer.Ingress {
if ingress.IP != "" {
logger.V(5).Info("found a load balancer:", "IP", svc.Spec.ClusterIP)
return nil
}
}
if svc.Spec.ClusterIP != "" {
logger.V(5).Info("found a ClusterIP:", "IP", svc.Spec.ClusterIP)
return nil
}
}
}
}
func (k *Kubectl) WaitForDeployment(ctx context.Context, cluster *types.Cluster, timeout string, condition string, target string, namespace string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, condition, "deployments/"+target, namespace)
}
// WaitForResourceRolledout waits for a resource (deployment, daemonset, or statefulset) to be successfully rolled out before returning.
func (k *Kubectl) WaitForResourceRolledout(ctx context.Context, cluster *types.Cluster, timeout string, target string, namespace string, resource string) error {
params := []string{"rollout", "status", resource, target, "--kubeconfig", cluster.KubeconfigFile, "--namespace", namespace, "--timeout", timeout}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("unable to finish %s roll out: %w", resource, err)
}
return nil
}
// WaitForPod waits for a pod resource to reach desired condition before returning.
func (k *Kubectl) WaitForPod(ctx context.Context, cluster *types.Cluster, timeout string, condition string, target string, namespace string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, condition, "pod/"+target, namespace)
}
// WaitForRufioMachines blocks until all Rufio Machines have the desired condition.
func (k *Kubectl) WaitForRufioMachines(ctx context.Context, cluster *types.Cluster, timeout string, condition string, namespace string) error {
return k.Wait(ctx, cluster.KubeconfigFile, timeout, condition, rufioMachineResourceType, namespace, WithWaitAll())
}
// WaitForJobCompleted waits for a job resource to reach desired condition before returning.
func (k *Kubectl) WaitForJobCompleted(ctx context.Context, kubeconfig, timeout string, condition string, target string, namespace string) error {
return k.Wait(ctx, kubeconfig, timeout, condition, "job/"+target, namespace)
}
// WaitForPackagesInstalled waits for a package resource to reach installed state before returning.
func (k *Kubectl) WaitForPackagesInstalled(ctx context.Context, cluster *types.Cluster, name string, timeout string, namespace string) error {
return k.WaitJSONPathLoop(ctx, cluster.KubeconfigFile, timeout, "status.state", "installed", fmt.Sprintf("%s/%s", eksaPackagesType, name), namespace)
}
// WaitForPodCompleted waits for a pod to be terminated with a Completed state before returning.
func (k *Kubectl) WaitForPodCompleted(ctx context.Context, cluster *types.Cluster, name string, timeout string, namespace string) error {
return k.WaitJSONPathLoop(ctx, cluster.KubeconfigFile, timeout, "status.containerStatuses[0].state.terminated.reason", "Completed", "pod/"+name, namespace)
}
func (k *Kubectl) Wait(ctx context.Context, kubeconfig string, timeout string, forCondition string, property string, namespace string, opts ...KubectlOpt) error {
// On each retry kubectl wait timeout values will have to be adjusted to only wait for the remaining timeout duration.
// Here we establish an absolute timeout time for this based on the caller-specified timeout.
timeoutDuration, err := time.ParseDuration(timeout)
if err != nil {
return fmt.Errorf("unparsable timeout specified: %w", err)
}
if timeoutDuration < 0 {
return fmt.Errorf("negative timeout specified: %w", err)
}
timeoutTime := time.Now().Add(timeoutDuration)
retrier := retrier.New(timeoutDuration, retrier.WithRetryPolicy(k.kubectlWaitRetryPolicy))
err = retrier.Retry(
func() error {
return k.wait(ctx, kubeconfig, timeoutTime, forCondition, property, namespace, opts...)
},
)
if err != nil {
return fmt.Errorf("executing wait: %w", err)
}
return nil
}
// WaitJSONPathLoop will wait for a given JSONPath to reach a required state similar to wait command for objects without conditions.
// This will be deprecated in favor of WaitJSONPath after version 1.23.
func (k *Kubectl) WaitJSONPathLoop(ctx context.Context, kubeconfig string, timeout string, jsonpath, forCondition string, property string, namespace string, opts ...KubectlOpt) error {
// On each retry kubectl wait timeout values will have to be adjusted to only wait for the remaining timeout duration.
// Here we establish an absolute timeout time for this based on the caller-specified timeout.
timeoutDuration, err := time.ParseDuration(timeout)
if err != nil {
return fmt.Errorf("unparsable timeout specified: %w", err)
}
if timeoutDuration < 0 {
return fmt.Errorf("negative timeout specified: %w", err)
}
retrier := retrier.New(timeoutDuration, retrier.WithRetryPolicy(k.kubectlWaitRetryPolicy))
err = retrier.Retry(
func() error {
return k.waitJSONPathLoop(ctx, kubeconfig, timeout, jsonpath, forCondition, property, namespace, opts...)
},
)
if err != nil {
return fmt.Errorf("executing wait: %w", err)
}
return nil
}
// WaitJSONPath will wait for a given JSONPath of a required state. Only compatible on K8s 1.23+.
func (k *Kubectl) WaitJSONPath(ctx context.Context, kubeconfig string, timeout string, jsonpath, forCondition string, property string, namespace string, opts ...KubectlOpt) error {
// On each retry kubectl wait timeout values will have to be adjusted to only wait for the remaining timeout duration.
// Here we establish an absolute timeout time for this based on the caller-specified timeout.
timeoutDuration, err := time.ParseDuration(timeout)
if err != nil {
return fmt.Errorf("unparsable timeout specified: %w", err)
}
if timeoutDuration < 0 {
return fmt.Errorf("negative timeout specified: %w", err)
}
retrier := retrier.New(timeoutDuration, retrier.WithRetryPolicy(k.kubectlWaitRetryPolicy))
err = retrier.Retry(
func() error {
return k.waitJSONPath(ctx, kubeconfig, timeout, jsonpath, forCondition, property, namespace, opts...)
},
)
if err != nil {
return fmt.Errorf("executing wait: %w", err)
}
return nil
}
func (k *Kubectl) kubectlWaitRetryPolicy(totalRetries int, err error) (retry bool, wait time.Duration) {
// Exponential backoff on network errors. Retrier built-in backoff is linear, so implementing here.
// Retrier first calls the policy before retry #1. We want it zero-based for exponentiation.
if totalRetries < 1 {
totalRetries = 1
}
waitTime := time.Duration(float64(k.networkFaultBaseRetryTime) * math.Pow(k.networkFaultBackoffFactor, float64(totalRetries-1)))
if match := kubectlConnectionRefusedRegex.MatchString(err.Error()); match {
return true, waitTime
}
if match := kubectlIoTimeoutRegex.MatchString(err.Error()); match {
return true, waitTime
}
return false, 0
}
func (k *Kubectl) wait(ctx context.Context, kubeconfig string, timeoutTime time.Time, forCondition string, property string, namespace string, opts ...KubectlOpt) error {
secondsRemainingUntilTimeout := time.Until(timeoutTime).Seconds()
if secondsRemainingUntilTimeout <= minimumWaitTimeout {
return fmt.Errorf("error: timed out waiting for condition %v on %v", forCondition, property)
}
kubectlTimeoutString := fmt.Sprintf("%.*fs", timeoutPrecision, secondsRemainingUntilTimeout)
params := []string{
"wait", "--timeout", kubectlTimeoutString,
"--for=condition=" + forCondition, property, "--kubeconfig", kubeconfig, "-n", namespace,
}
applyOpts(¶ms, opts...)
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("executing wait: %w", err)
}
return nil
}
func (k *Kubectl) waitJSONPath(ctx context.Context, kubeconfig, timeout string, jsonpath string, forCondition string, property string, namespace string, opts ...KubectlOpt) error {
if jsonpath == "" || forCondition == "" {
return fmt.Errorf("empty conditions params passed to waitJSONPath()")
}
params := []string{
"wait", "--timeout", timeout, fmt.Sprintf("--for=jsonpath='{.%s}'=%s", jsonpath, forCondition), property, "--kubeconfig", kubeconfig, "-n", namespace,
}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("executing wait: %w", err)
}
return nil
}
// waitJsonPathLoop will be deprecated in favor of waitJsonPath after version 1.23.
func (k *Kubectl) waitJSONPathLoop(ctx context.Context, kubeconfig string, timeout string, jsonpath string, forCondition string, property string, namespace string, opts ...KubectlOpt) error {
if jsonpath == "" || forCondition == "" {
return fmt.Errorf("empty conditions params passed to waitJSONPathLoop()")
}
timeoutDur, err := time.ParseDuration(timeout)
if err != nil {
return fmt.Errorf("parsing duration %q: %w", timeout, err)
}
timeoutCtx, cancel := context.WithTimeout(ctx, timeoutDur)
defer cancel()
timedOut := timeoutCtx.Done()
const pollInterval = time.Second * 5
ticker := time.NewTicker(pollInterval)
defer ticker.Stop()
for {
select {
case <-timedOut:
return fmt.Errorf("waiting for %s %s on %s: timed out", jsonpath, forCondition, property)
case <-ticker.C:
params := []string{
"get", property,
"-o", fmt.Sprintf("jsonpath='{.%s}'", jsonpath),
"--kubeconfig", kubeconfig,
"-n", namespace,
}
stdout, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("waiting for %s %s on %s: %w", jsonpath, forCondition, property, err)
}
if strings.Contains(stdout.String(), forCondition) {
return nil
}
fmt.Printf("waiting 5 seconds.... current state=%v, desired state=%v\n", stdout.String(), fmt.Sprintf("'%s'", forCondition))
}
}
}
func (k *Kubectl) DeleteEksaDatacenterConfig(ctx context.Context, eksaDatacenterResourceType string, eksaDatacenterConfigName string, kubeconfigFile string, namespace string) error {
params := []string{"delete", eksaDatacenterResourceType, eksaDatacenterConfigName, "--kubeconfig", kubeconfigFile, "--namespace", namespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting %s cluster %s apply: %v", eksaDatacenterResourceType, eksaDatacenterConfigName, err)
}
return nil
}
func (k *Kubectl) DeleteEksaMachineConfig(ctx context.Context, eksaMachineConfigResourceType string, eksaMachineConfigName string, kubeconfigFile string, namespace string) error {
params := []string{"delete", eksaMachineConfigResourceType, eksaMachineConfigName, "--kubeconfig", kubeconfigFile, "--namespace", namespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting %s cluster %s apply: %v", eksaMachineConfigResourceType, eksaMachineConfigName, err)
}
return nil
}
func (k *Kubectl) DeleteEKSACluster(ctx context.Context, managementCluster *types.Cluster, eksaClusterName, eksaClusterNamespace string) error {
params := []string{"delete", eksaClusterResourceType, eksaClusterName, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", eksaClusterNamespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting eksa cluster %s apply: %v", eksaClusterName, err)
}
return nil
}
func (k *Kubectl) DeleteGitOpsConfig(ctx context.Context, managementCluster *types.Cluster, gitOpsConfigName, gitOpsConfigNamespace string) error {
params := []string{"delete", eksaGitOpsResourceType, gitOpsConfigName, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", gitOpsConfigNamespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting gitops config %s apply: %v", gitOpsConfigName, err)
}
return nil
}
func (k *Kubectl) DeleteFluxConfig(ctx context.Context, managementCluster *types.Cluster, fluxConfigName, fluxConfigNamespace string) error {
params := []string{"delete", eksaFluxConfigResourceType, fluxConfigName, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", fluxConfigNamespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting gitops config %s apply: %v", fluxConfigName, err)
}
return nil
}
// GetPackageBundleController will retrieve the packagebundlecontroller from eksa-packages namespace and return the object.
func (k *Kubectl) GetPackageBundleController(ctx context.Context, kubeconfigFile, clusterName string) (packagesv1.PackageBundleController, error) {
params := []string{"get", eksaPackagesBundleControllerType, clusterName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", constants.EksaPackagesName, "--ignore-not-found=true"}
stdOut, _ := k.Execute(ctx, params...)
response := &packagesv1.PackageBundleController{}
err := json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return packagesv1.PackageBundleController{}, fmt.Errorf("unmarshalling kubectl response to GO struct %s: %v", clusterName, err)
}
return *response, nil
}
// GetPackageBundleList will retrieve the packagebundle list from eksa-packages namespace and return the list.
func (k *Kubectl) GetPackageBundleList(ctx context.Context, kubeconfigFile string) ([]packagesv1.PackageBundle, error) {
err := k.WaitJSONPathLoop(ctx, kubeconfigFile, "5m", "items", "PackageBundle", eksaPackageBundlesType, constants.EksaPackagesName)
if err != nil {
return nil, fmt.Errorf("waiting on package bundle resource to exist %v", err)
}
params := []string{"get", eksaPackageBundlesType, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", constants.EksaPackagesName, "--ignore-not-found=true"}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting package bundle resource %v", err)
}
response := &packagesv1.PackageBundleList{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("unmarshalling kubectl response to GO struct %v", err)
}
return response.Items, nil
}
func (k *Kubectl) DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error {
params := []string{"delete", eksaPackagesBundleControllerType, clusterName, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", constants.EksaPackagesName, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting package resources for %s: %v", clusterName, err)
}
params = []string{"delete", "namespace", "eksa-packages-" + clusterName, "--kubeconfig", managementCluster.KubeconfigFile, "--ignore-not-found=true"}
_, err = k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting package resources for %s: %v", clusterName, err)
}
return nil
}
func (k *Kubectl) DeleteSecret(ctx context.Context, managementCluster *types.Cluster, secretName, namespace string) error {
params := []string{"delete", "secret", secretName, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", namespace}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting secret %s in namespace %s: %v", secretName, namespace, err)
}
return nil
}
func (k *Kubectl) DeleteOIDCConfig(ctx context.Context, managementCluster *types.Cluster, oidcConfigName, oidcConfigNamespace string) error {
params := []string{"delete", eksaOIDCResourceType, oidcConfigName, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", oidcConfigNamespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting oidc config %s apply: %v", oidcConfigName, err)
}
return nil
}
func (k *Kubectl) DeleteAWSIamConfig(ctx context.Context, managementCluster *types.Cluster, awsIamConfigName, awsIamConfigNamespace string) error {
params := []string{"delete", eksaAwsIamResourceType, awsIamConfigName, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", awsIamConfigNamespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting awsIam config %s apply: %v", awsIamConfigName, err)
}
return nil
}
func (k *Kubectl) DeleteCluster(ctx context.Context, managementCluster, clusterToDelete *types.Cluster) error {
params := []string{"delete", capiClustersResourceType, clusterToDelete.Name, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting cluster %s apply: %v", clusterToDelete.Name, err)
}
return nil
}
func (k *Kubectl) ListCluster(ctx context.Context) error {
params := []string{"get", "pods", "-A", "-o", "jsonpath={..image}"}
output, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("listing cluster versions: %v", err)
}
keys := make(map[string]bool)
list := []string{}
for _, entry := range strings.Fields(output.String()) {
if _, found := keys[entry]; !found {
keys[entry] = true
list = append(list, entry)
}
}
sort.Strings(list)
for _, value := range list {
logger.Info(value)
}
return nil
}
func (k *Kubectl) GetNodes(ctx context.Context, kubeconfig string) ([]corev1.Node, error) {
params := []string{"get", "nodes", "-o", "json", "--kubeconfig", kubeconfig}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting nodes: %v", err)
}
response := &corev1.NodeList{}
err = json.Unmarshal(stdOut.Bytes(), response)
return response.Items, err
}
func (k *Kubectl) GetControlPlaneNodes(ctx context.Context, kubeconfig string) ([]corev1.Node, error) {
params := []string{"get", "nodes", "-o", "json", "--kubeconfig", kubeconfig, "--selector=node-role.kubernetes.io/control-plane"}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting control plane nodes: %v", err)
}
response := &corev1.NodeList{}
err = json.Unmarshal(stdOut.Bytes(), response)
return response.Items, err
}
func (k *Kubectl) ValidateNodes(ctx context.Context, kubeconfig string) error {
template := "{{range .items}}{{.metadata.name}}\n{{end}}"
params := []string{"get", "nodes", "-o", "go-template", "--template", template, "--kubeconfig", kubeconfig}
buffer, err := k.Execute(ctx, params...)
if err != nil {
return err
}
scanner := bufio.NewScanner(strings.NewReader(buffer.String()))
for scanner.Scan() {
node := scanner.Text()
if len(node) != 0 {
template = "{{range .status.conditions}}{{if eq .type \"Ready\"}}{{.reason}}{{end}}{{end}}"
params = []string{"get", "node", node, "-o", "go-template", "--template", template, "--kubeconfig", kubeconfig}
buffer, err = k.Execute(ctx, params...)
if err != nil {
return err
}
if buffer.String() != "KubeletReady" {
return fmt.Errorf("node %s is not ready, currently in %s state", node, buffer.String())
}
}
}
return nil
}
func (k *Kubectl) DeleteOldWorkerNodeGroup(ctx context.Context, md *clusterv1.MachineDeployment, kubeconfig string) error {
kubeadmConfigTemplateName := md.Spec.Template.Spec.Bootstrap.ConfigRef.Name
providerMachineTemplateName := md.Spec.Template.Spec.InfrastructureRef.Name
params := []string{"delete", md.Kind, md.Name, "--kubeconfig", kubeconfig, "--namespace", constants.EksaSystemNamespace}
if _, err := k.Execute(ctx, params...); err != nil {
return err
}
params = []string{"delete", md.Spec.Template.Spec.Bootstrap.ConfigRef.Kind, kubeadmConfigTemplateName, "--kubeconfig", kubeconfig, "--namespace", constants.EksaSystemNamespace}
if _, err := k.Execute(ctx, params...); err != nil {
return err
}
params = []string{"delete", md.Spec.Template.Spec.InfrastructureRef.Kind, providerMachineTemplateName, "--kubeconfig", kubeconfig, "--namespace", constants.EksaSystemNamespace}
if _, err := k.Execute(ctx, params...); err != nil {
return err
}
return nil
}
func (k *Kubectl) ValidateControlPlaneNodes(ctx context.Context, cluster *types.Cluster, clusterName string) error {
cp, err := k.GetKubeadmControlPlane(ctx, cluster, clusterName, WithCluster(cluster), WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return err
}
observedGeneration := cp.Status.ObservedGeneration
generation := cp.Generation
if observedGeneration != generation {
return fmt.Errorf("kubeadm control plane %s status needs to be refreshed: observed generation is %d, want %d", cp.Name, observedGeneration, generation)
}
if !cp.Status.Ready {
return errors.New("control plane is not ready")
}
if cp.Status.UnavailableReplicas != 0 {
return fmt.Errorf("%v control plane replicas are unavailable", cp.Status.UnavailableReplicas)
}
if cp.Status.ReadyReplicas != cp.Status.Replicas {
return fmt.Errorf("%v control plane replicas are not ready", cp.Status.Replicas-cp.Status.ReadyReplicas)
}
return nil
}
func (k *Kubectl) ValidateWorkerNodes(ctx context.Context, clusterName string, kubeconfig string) error {
logger.V(6).Info("waiting for nodes", "cluster", clusterName)
ready, total, err := k.CountMachineDeploymentReplicasReady(ctx, clusterName, kubeconfig)
if err != nil {
return err
}
if ready != total {
return fmt.Errorf("%d machine deployment replicas are not ready", total-ready)
}
return nil
}
func (k *Kubectl) CountMachineDeploymentReplicasReady(ctx context.Context, clusterName string, kubeconfig string) (ready, total int, err error) {
logger.V(6).Info("counting ready machine deployment replicas", "cluster", clusterName)
deployments, err := k.GetMachineDeploymentsForCluster(ctx, clusterName, WithKubeconfig(kubeconfig), WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return 0, 0, err
}
for _, machineDeployment := range deployments {
if machineDeployment.Status.Phase != "Running" {
return 0, 0, fmt.Errorf("machine deployment is in %s phase", machineDeployment.Status.Phase)
}
if machineDeployment.Status.UnavailableReplicas != 0 {
return 0, 0, fmt.Errorf("%d machine deployment replicas are unavailable", machineDeployment.Status.UnavailableReplicas)
}
ready += int(machineDeployment.Status.ReadyReplicas)
total += int(machineDeployment.Status.Replicas)
}
return ready, total, nil
}
func (k *Kubectl) VsphereWorkerNodesMachineTemplate(ctx context.Context, clusterName string, kubeconfig string, namespace string) (*vspherev1.VSphereMachineTemplate, error) {
machineTemplateName, err := k.MachineTemplateName(ctx, clusterName, kubeconfig, WithNamespace(namespace))
if err != nil {
return nil, err
}
params := []string{"get", vsphereMachineTemplatesType, machineTemplateName, "-o", "go-template", "--template", "{{.spec.template.spec}}", "-o", "yaml", "--kubeconfig", kubeconfig, "--namespace", namespace}
buffer, err := k.Execute(ctx, params...)
if err != nil {
return nil, err
}
machineTemplateSpec := &vspherev1.VSphereMachineTemplate{}
if err := yaml.Unmarshal(buffer.Bytes(), machineTemplateSpec); err != nil {
return nil, err
}
return machineTemplateSpec, nil
}
func (k *Kubectl) CloudstackWorkerNodesMachineTemplate(ctx context.Context, clusterName string, kubeconfig string, namespace string) (*cloudstackv1.CloudStackMachineTemplate, error) {
machineTemplateName, err := k.MachineTemplateName(ctx, clusterName, kubeconfig, WithNamespace(namespace))
if err != nil {
return nil, err
}
params := []string{"get", cloudstackMachineTemplatesType, machineTemplateName, "-o", "go-template", "--template", "{{.spec.template.spec}}", "-o", "yaml", "--kubeconfig", kubeconfig, "--namespace", namespace}
buffer, err := k.Execute(ctx, params...)
if err != nil {
return nil, err
}
machineTemplateSpec := &cloudstackv1.CloudStackMachineTemplate{}
if err := yaml.Unmarshal(buffer.Bytes(), machineTemplateSpec); err != nil {
return nil, err
}
return machineTemplateSpec, nil
}
func (k *Kubectl) MachineTemplateName(ctx context.Context, clusterName string, kubeconfig string, opts ...KubectlOpt) (string, error) {
template := "{{.spec.template.spec.infrastructureRef.name}}"
params := []string{"get", capiMachineDeploymentsType, fmt.Sprintf("%s-md-0", clusterName), "-o", "go-template", "--template", template, "--kubeconfig", kubeconfig}
applyOpts(¶ms, opts...)
buffer, err := k.Execute(ctx, params...)
if err != nil {
return "", err
}
return buffer.String(), nil
}
func (k *Kubectl) ValidatePods(ctx context.Context, kubeconfig string) error {
template := "{{range .items}}{{.metadata.name}},{{.status.phase}}\n{{end}}"
params := []string{"get", "pods", "-A", "-o", "go-template", "--template", template, "--kubeconfig", kubeconfig}
buffer, err := k.Execute(ctx, params...)
if err != nil {
return err
}
scanner := bufio.NewScanner(strings.NewReader(buffer.String()))
for scanner.Scan() {
data := strings.Split(scanner.Text(), ",")
if len(data) == 2 {
if data[1] != "Running" {
return fmt.Errorf("pod %s is not running, currently in %s phase", data[0], data[1])
}
}
}
logger.Info("All pods are running")
return nil
}
// RunBusyBoxPod will run Kubectl run with a busybox curl image and the command you pass in.
func (k *Kubectl) RunBusyBoxPod(ctx context.Context, namespace, name, kubeconfig string, command []string) (string, error) {
params := []string{"run", name, "--image=yauritux/busybox-curl", "-o", "json", "--kubeconfig", kubeconfig, "--namespace", namespace, "--restart=Never"}
params = append(params, command...)
_, err := k.Execute(ctx, params...)
if err != nil {
return "", err
}
return name, err
}
// GetPodNameByLabel will return the name of the first pod that matches the label.
func (k *Kubectl) GetPodNameByLabel(ctx context.Context, namespace, label, kubeconfig string) (string, error) {
params := []string{"get", "pod", "-l=" + label, "-o=jsonpath='{.items[0].metadata.name}'", "--kubeconfig", kubeconfig, "--namespace", namespace}
podName, err := k.Execute(ctx, params...)
if err != nil {
return "", err
}
return strings.Trim(podName.String(), `'"`), err
}
// GetPodIP will return the ip of the pod.
func (k *Kubectl) GetPodIP(ctx context.Context, namespace, podName, kubeconfig string) (string, error) {
params := []string{"get", "pod", podName, "-o=jsonpath='{.status.podIP}'", "--kubeconfig", kubeconfig, "--namespace", namespace}
ip, err := k.Execute(ctx, params...)
if err != nil {
return "", err
}
return strings.Trim(ip.String(), `'"`), err
}
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
func (k *Kubectl) GetPodLogs(ctx context.Context, namespace, podName, containerName, kubeconfig string) (string, error) {
return k.getPodLogs(ctx, namespace, podName, containerName, kubeconfig, nil, nil)
}
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
func (k *Kubectl) GetPodLogsSince(ctx context.Context, namespace, podName, containerName, kubeconfig string, since time.Time) (string, error) {
sinceTime := metav1.NewTime(since)
return k.getPodLogs(ctx, namespace, podName, containerName, kubeconfig, &sinceTime, nil)
}
func (k *Kubectl) getPodLogs(ctx context.Context, namespace, podName, containerName, kubeconfig string, sinceTime *metav1.Time, tailLines *int) (string, error) {
params := []string{"logs", podName, containerName, "--kubeconfig", kubeconfig, "--namespace", namespace}
if sinceTime != nil {
params = append(params, "--since-time", sinceTime.Format(time.RFC3339))
}
if tailLines != nil {
params = append(params, "--tail", strconv.Itoa(*tailLines))
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return "", err
}
logs := stdOut.String()
if strings.Contains(logs, "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q", logs)
}
return logs, err
}
func (k *Kubectl) SaveLog(ctx context.Context, cluster *types.Cluster, deployment *types.Deployment, fileName string, writer filewriter.FileWriter) error {
params := []string{"--kubeconfig", cluster.KubeconfigFile}
logParams := []string{
"logs",
fmt.Sprintf("deployment/%s", deployment.Name),
"-n",
deployment.Namespace,
}
if deployment.Container != "" {
logParams = append(logParams, "-c", deployment.Container)
}
params = append(params, logParams...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("saving logs: %v", err)
}
_, err = writer.Write(fileName, stdOut.Bytes())
if err != nil {
return err
}
return nil
}
type machinesResponse struct {
Items []types.Machine `json:"items,omitempty"`
}
func (k *Kubectl) GetMachines(ctx context.Context, cluster *types.Cluster, clusterName string) ([]types.Machine, error) {
params := []string{
"get", capiMachinesType, "-o", "json", "--kubeconfig", cluster.KubeconfigFile,
"--selector=cluster.x-k8s.io/cluster-name=" + clusterName,
"--namespace", constants.EksaSystemNamespace,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting machines: %v", err)
}
response := &machinesResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get machines response: %v", err)
}
return response.Items, nil
}
type machineSetResponse struct {
Items []clusterv1.MachineSet `json:"items,omitempty"`
}
func (k *Kubectl) GetMachineSets(ctx context.Context, machineDeploymentName string, cluster *types.Cluster) ([]clusterv1.MachineSet, error) {
params := []string{
"get", capiMachineSetsType, "-o", "json", "--kubeconfig", cluster.KubeconfigFile,
"--selector=cluster.x-k8s.io/deployment-name=" + machineDeploymentName,
"--namespace", constants.EksaSystemNamespace,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting machineset associated with deployment %s: %v", machineDeploymentName, err)
}
response := &machineSetResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get machinesets response: %v", err)
}
return response.Items, nil
}
type ClustersResponse struct {
Items []types.CAPICluster `json:"items,omitempty"`
}
type GitOpsConfigResponse struct {
Items []*v1alpha1.GitOpsConfig `json:"items,omitempty"`
}
type VSphereDatacenterConfigResponse struct {
Items []*v1alpha1.VSphereDatacenterConfig `json:"items,omitempty"`
}
type CloudStackDatacenterConfigResponse struct {
Items []*v1alpha1.CloudStackDatacenterConfig `json:"items,omitempty"`
}
// TinkerbellDatacenterConfigResponse contains list of TinkerbellDatacenterConfig.
type TinkerbellDatacenterConfigResponse struct {
Items []*v1alpha1.TinkerbellDatacenterConfig `json:"items,omitempty"`
}
type NutanixDatacenterConfigResponse struct {
Items []*v1alpha1.NutanixDatacenterConfig `json:"items,omitempty"`
}
type IdentityProviderConfigResponse struct {
Items []*v1alpha1.Ref `json:"items,omitempty"`
}
type VSphereMachineConfigResponse struct {
Items []*v1alpha1.VSphereMachineConfig `json:"items,omitempty"`
}
type CloudStackMachineConfigResponse struct {
Items []*v1alpha1.CloudStackMachineConfig `json:"items,omitempty"`
}
// TinkerbellMachineConfigResponse contains list of TinkerbellMachineConfig.
type TinkerbellMachineConfigResponse struct {
Items []*v1alpha1.TinkerbellMachineConfig `json:"items,omitempty"`
}
type NutanixMachineConfigResponse struct {
Items []*v1alpha1.NutanixMachineConfig `json:"items,omitempty"`
}
func (k *Kubectl) ValidateClustersCRD(ctx context.Context, cluster *types.Cluster) error {
params := []string{"get", "customresourcedefinition", capiClustersResourceType, "--kubeconfig", cluster.KubeconfigFile}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("getting clusters crd: %v", err)
}
return nil
}
func (k *Kubectl) ValidateEKSAClustersCRD(ctx context.Context, cluster *types.Cluster) error {
params := []string{"get", "customresourcedefinition", eksaClusterResourceType, "--kubeconfig", cluster.KubeconfigFile}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("getting eksa clusters crd: %v", err)
}
return nil
}
func (k *Kubectl) RolloutRestartDaemonSet(ctx context.Context, dsName, dsNamespace, kubeconfig string) error {
params := []string{
"rollout", "restart", "daemonset", dsName,
"--kubeconfig", kubeconfig, "--namespace", dsNamespace,
}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("restarting %s daemonset in namespace %s: %v", dsName, dsNamespace, err)
}
return nil
}
func (k *Kubectl) SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error {
params := []string{
"set", "env", "deployment/eksa-controller-manager", fmt.Sprintf("%s=%s", envVar, envVarVal),
"--kubeconfig", kubeconfig, "--namespace", constants.EksaSystemNamespace,
}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("setting %s=%s on eksa controller: %v", envVar, envVarVal, err)
}
return nil
}
func (k *Kubectl) GetClusters(ctx context.Context, cluster *types.Cluster) ([]types.CAPICluster, error) {
params := []string{"get", capiClustersResourceType, "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting clusters: %v", err)
}
response := &ClustersResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get clusters response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) GetApiServerUrl(ctx context.Context, cluster *types.Cluster) (string, error) {
params := []string{"config", "view", "--kubeconfig", cluster.KubeconfigFile, "--minify", "--raw", "-o", "jsonpath={.clusters[0].cluster.server}"}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return "", fmt.Errorf("getting api server url: %v", err)
}
return stdOut.String(), nil
}
func (k *Kubectl) Version(ctx context.Context, cluster *types.Cluster) (*VersionResponse, error) {
params := []string{"version", "-o", "json", "--kubeconfig", cluster.KubeconfigFile}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("executing kubectl version: %v", err)
}
response := &VersionResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("unmarshalling kubectl version response: %v", err)
}
return response, nil
}
type KubectlOpt func(*[]string)
// WithToken is a kubectl option to pass a token when making a kubectl call.
func WithToken(t string) KubectlOpt {
return appendOpt("--token", t)
}
func WithServer(s string) KubectlOpt {
return appendOpt("--server", s)
}
func WithCluster(c *types.Cluster) KubectlOpt {
return WithKubeconfig(c.KubeconfigFile)
}
func WithKubeconfig(kubeconfigFile string) KubectlOpt {
return appendOpt("--kubeconfig", kubeconfigFile)
}
func WithNamespace(n string) KubectlOpt {
return appendOpt("--namespace", n)
}
// WithResourceName is a kubectl option to pass a resource name when making a kubectl call.
func WithResourceName(name string) KubectlOpt {
return appendOpt(name)
}
// WithAllNamespaces is a kubectl option to add all namespaces when making a kubectl call.
func WithAllNamespaces() KubectlOpt {
return appendOpt("-A")
}
func WithSkipTLSVerify() KubectlOpt {
return appendOpt("--insecure-skip-tls-verify=true")
}
func WithOverwrite() KubectlOpt {
return appendOpt("--overwrite")
}
func WithWaitAll() KubectlOpt {
return appendOpt("--all")
}
// WithSelector is a kubectl option to pass a selector when making kubectl calls.
func WithSelector(selector string) KubectlOpt {
return appendOpt("--selector=" + selector)
}
func appendOpt(new ...string) KubectlOpt {
return func(args *[]string) {
*args = append(*args, new...)
}
}
func applyOpts(params *[]string, opts ...KubectlOpt) {
for _, opt := range opts {
opt(params)
}
}
func (k *Kubectl) GetPods(ctx context.Context, opts ...KubectlOpt) ([]corev1.Pod, error) {
params := []string{"get", "pods", "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting pods: %v", err)
}
response := &corev1.PodList{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get pods response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) GetDeployments(ctx context.Context, opts ...KubectlOpt) ([]appsv1.Deployment, error) {
params := []string{"get", "deployments", "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting deployments: %v", err)
}
response := &appsv1.DeploymentList{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get deployments response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) GetSecretFromNamespace(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.Secret, error) {
obj := &corev1.Secret{}
if err := k.GetObject(ctx, "secret", name, namespace, kubeconfigFile, obj); err != nil {
return nil, err
}
return obj, nil
}
func (k *Kubectl) GetSecret(ctx context.Context, secretObjectName string, opts ...KubectlOpt) (*corev1.Secret, error) {
params := []string{"get", "secret", secretObjectName, "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting secret: %v", err)
}
response := &corev1.Secret{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get secret response: %v", err)
}
return response, err
}
func (k *Kubectl) GetKubeadmControlPlanes(ctx context.Context, opts ...KubectlOpt) ([]controlplanev1.KubeadmControlPlane, error) {
params := []string{"get", kubeadmControlPlaneResourceType, "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting kubeadmcontrolplanes: %v", err)
}
response := &controlplanev1.KubeadmControlPlaneList{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get kubeadmcontrolplanes response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...KubectlOpt) (*controlplanev1.KubeadmControlPlane, error) {
logger.V(6).Info("Getting KubeadmControlPlane CRDs", "cluster", clusterName)
params := []string{"get", kubeadmControlPlaneResourceType, clusterName, "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting kubeadmcontrolplane: %v", err)
}
response := &controlplanev1.KubeadmControlPlane{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get kubeadmcontrolplane response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...KubectlOpt) (*clusterv1.MachineDeployment, error) {
params := []string{"get", capiMachineDeploymentsType, workerNodeGroupName, "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting machine deployment: %v", err)
}
response := &clusterv1.MachineDeployment{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get machineDeployment response: %v", err)
}
return response, nil
}
// GetMachineDeployments retrieves all Machine Deployments.
func (k *Kubectl) GetMachineDeployments(ctx context.Context, opts ...KubectlOpt) ([]clusterv1.MachineDeployment, error) {
params := []string{"get", capiMachineDeploymentsType, "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting machine deployments: %v", err)
}
response := &clusterv1.MachineDeploymentList{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get machineDeployments response: %v", err)
}
return response.Items, nil
}
// GetMachineDeploymentsForCluster retrieves all the Machine Deployments for a cluster with name "clusterName".
func (k *Kubectl) GetMachineDeploymentsForCluster(ctx context.Context, clusterName string, opts ...KubectlOpt) ([]clusterv1.MachineDeployment, error) {
return k.GetMachineDeployments(ctx, append(opts, WithSelector(fmt.Sprintf("cluster.x-k8s.io/cluster-name=%s", clusterName)))...)
}
func (k *Kubectl) UpdateEnvironmentVariables(ctx context.Context, resourceType, resourceName string, envMap map[string]string, opts ...KubectlOpt) error {
params := []string{"set", "env", resourceType, resourceName}
for k, v := range envMap {
params = append(params, fmt.Sprintf("%s=%s", k, v))
}
applyOpts(¶ms, opts...)
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("setting the environment variables in %s %s: %v", resourceType, resourceName, err)
}
return nil
}
func (k *Kubectl) UpdateEnvironmentVariablesInNamespace(ctx context.Context, resourceType, resourceName string, envMap map[string]string, cluster *types.Cluster, namespace string) error {
return k.UpdateEnvironmentVariables(ctx, resourceType, resourceName, envMap, WithCluster(cluster), WithNamespace(namespace))
}
func (k *Kubectl) UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...KubectlOpt) error {
params := []string{"annotate", resourceType, objectName}
for k, v := range annotations {
params = append(params, fmt.Sprintf("%s=%s", k, v))
}
applyOpts(¶ms, opts...)
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("updating annotation: %v", err)
}
return nil
}
func (k *Kubectl) UpdateAnnotationInNamespace(ctx context.Context, resourceType, objectName string, annotations map[string]string, cluster *types.Cluster, namespace string) error {
return k.UpdateAnnotation(ctx, resourceType, objectName, annotations, WithOverwrite(), WithCluster(cluster), WithNamespace(namespace))
}
func (k *Kubectl) RemoveAnnotation(ctx context.Context, resourceType, objectName string, key string, opts ...KubectlOpt) error {
params := []string{"annotate", resourceType, objectName, fmt.Sprintf("%s-", key)}
applyOpts(¶ms, opts...)
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("removing annotation: %v", err)
}
return nil
}
func (k *Kubectl) RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error {
return k.RemoveAnnotation(ctx, resourceType, objectName, key, WithCluster(cluster), WithNamespace(namespace))
}
func (k *Kubectl) GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error) {
params := []string{"get", eksaClusterResourceType, "-A", "-o", "jsonpath={.items[0]}", "--kubeconfig", cluster.KubeconfigFile, "--field-selector=metadata.name=" + clusterName}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
params := []string{"get", eksaClusterResourceType, "-A", "--kubeconfig", cluster.KubeconfigFile, "--field-selector=metadata.name=" + clusterName}
stdOut, err = k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa cluster: %v", err)
}
return nil, fmt.Errorf("cluster %s not found of custom resource type %s", clusterName, eksaClusterResourceType)
}
response := &v1alpha1.Cluster{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get eksa cluster response: %v", err)
}
return response, nil
}
func (k *Kubectl) SearchVsphereMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereMachineConfig, error) {
params := []string{
"get", eksaVSphereMachineResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa VSphereMachineConfigResponse: %v", err)
}
response := &VSphereMachineConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing VSphereMachineConfigResponse response: %v", err)
}
return response.Items, nil
}
// SearchTinkerbellMachineConfig returns the list of TinkerbellMachineConfig in the cluster.
func (k *Kubectl) SearchTinkerbellMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.TinkerbellMachineConfig, error) {
params := []string{
"get", eksaTinkerbellMachineResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa TinkerbellMachineConfigResponse: %v", err)
}
response := &TinkerbellMachineConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing TinkerbellMachineConfigResponse response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) SearchIdentityProviderConfig(ctx context.Context, ipName string, kind string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error) {
var internalType string
switch kind {
case v1alpha1.OIDCConfigKind:
internalType = fmt.Sprintf("oidcconfigs.%s", v1alpha1.GroupVersion.Group)
case v1alpha1.AWSIamConfigKind:
internalType = fmt.Sprintf("awsiamconfigs.%s", v1alpha1.GroupVersion.Group)
default:
return nil, fmt.Errorf("invalid identity provider %s", kind)
}
params := []string{
"get", internalType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + ipName,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa IdentityProvider: %v", err)
}
response := &VSphereDatacenterConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing IdentityProviderConfigResponse response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) SearchVsphereDatacenterConfig(ctx context.Context, datacenterName string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error) {
params := []string{
"get", eksaVSphereDatacenterResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + datacenterName,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa VSphereDatacenterConfigResponse: %v", err)
}
response := &VSphereDatacenterConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing VSphereDatacenterConfigResponse response: %v", err)
}
return response.Items, nil
}
// SearchTinkerbellDatacenterConfig returns the list of TinkerbellDatacenterConfig in the cluster.
func (k *Kubectl) SearchTinkerbellDatacenterConfig(ctx context.Context, datacenterName string, kubeconfigFile string, namespace string) ([]*v1alpha1.TinkerbellDatacenterConfig, error) {
params := []string{
"get", eksaTinkerbellDatacenterResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + datacenterName,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa TinkerbellDatacenterConfigResponse: %v", err)
}
response := &TinkerbellDatacenterConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing TinkerbellDatacenterConfigResponse response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) GetEksaFluxConfig(ctx context.Context, gitOpsConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.FluxConfig, error) {
params := []string{"get", eksaFluxConfigResourceType, gitOpsConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa FluxConfig: %v", err)
}
response := &v1alpha1.FluxConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing FluxConfig response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaGitOpsConfig(ctx context.Context, gitOpsConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.GitOpsConfig, error) {
params := []string{"get", eksaGitOpsResourceType, gitOpsConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa GitOpsConfig: %v", err)
}
response := &v1alpha1.GitOpsConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing GitOpsConfig response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaOIDCConfig(ctx context.Context, oidcConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.OIDCConfig, error) {
params := []string{"get", eksaOIDCResourceType, oidcConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa OIDCConfig: %v", err)
}
response := &v1alpha1.OIDCConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing OIDCConfig response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaAWSIamConfig(ctx context.Context, awsIamConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.AWSIamConfig, error) {
params := []string{"get", eksaAwsIamResourceType, awsIamConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa AWSIamConfig: %v", err)
}
response := &v1alpha1.AWSIamConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing AWSIamConfig response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaTinkerbellDatacenterConfig(ctx context.Context, tinkerbellDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.TinkerbellDatacenterConfig, error) {
params := []string{"get", eksaTinkerbellDatacenterResourceType, tinkerbellDatacenterConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa TinkerbellDatacenterConfig %v", err)
}
response := &v1alpha1.TinkerbellDatacenterConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get eksa TinkerbellDatacenterConfig response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error) {
params := []string{"get", eksaVSphereDatacenterResourceType, vsphereDatacenterConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa vsphere cluster %v", err)
}
response := &v1alpha1.VSphereDatacenterConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get eksa vsphere cluster response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaTinkerbellMachineConfig(ctx context.Context, tinkerbellMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.TinkerbellMachineConfig, error) {
params := []string{"get", eksaTinkerbellMachineResourceType, tinkerbellMachineConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa TinkerbellMachineConfig %v", err)
}
response := &v1alpha1.TinkerbellMachineConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get eksa TinkerbellMachineConfig response: %v", err)
}
return response, nil
}
// GetUnprovisionedTinkerbellHardware retrieves unprovisioned Tinkerbell Hardware objects.
// Unprovisioned objects are those without any owner reference information.
func (k *Kubectl) GetUnprovisionedTinkerbellHardware(ctx context.Context, kubeconfig, namespace string) ([]tinkv1alpha1.Hardware, error) {
// Retrieve hardware resources that don't have the `v1alpha1.tinkerbell.org/ownerName` label.
// This label is used to populate hardware when the CAPT controller acquires the Hardware
// resource for provisioning.
// See https://github.com/chrisdoherty4/cluster-api-provider-tinkerbell/blob/main/controllers/machine.go#L271
params := []string{
"get", TinkerbellHardwareResourceType,
"-l", "!v1alpha1.tinkerbell.org/ownerName",
"--kubeconfig", kubeconfig,
"-o", "json",
"--namespace", namespace,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, err
}
var list tinkv1alpha1.HardwareList
if err := json.Unmarshal(stdOut.Bytes(), &list); err != nil {
return nil, err
}
return list.Items, nil
}
// GetProvisionedTinkerbellHardware retrieves provisioned Tinkerbell Hardware objects.
// Provisioned objects are those with owner reference information.
func (k *Kubectl) GetProvisionedTinkerbellHardware(ctx context.Context, kubeconfig, namespace string) ([]tinkv1alpha1.Hardware, error) {
// Retrieve hardware resources that have the `v1alpha1.tinkerbell.org/ownerName` label.
// This label is used to populate hardware when the CAPT controller acquires the Hardware
// resource for provisioning.
params := []string{
"get", TinkerbellHardwareResourceType,
"-l", "v1alpha1.tinkerbell.org/ownerName",
"--kubeconfig", kubeconfig,
"-o", "json",
"--namespace", namespace,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, err
}
var list tinkv1alpha1.HardwareList
if err := json.Unmarshal(stdOut.Bytes(), &list); err != nil {
return nil, err
}
return list.Items, nil
}
func (k *Kubectl) GetEksaVSphereMachineConfig(ctx context.Context, vsphereMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereMachineConfig, error) {
params := []string{"get", eksaVSphereMachineResourceType, vsphereMachineConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa vsphere cluster %v", err)
}
response := &v1alpha1.VSphereMachineConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get eksa vsphere cluster response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaAWSDatacenterConfig(ctx context.Context, awsDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.AWSDatacenterConfig, error) {
params := []string{"get", eksaAwsResourceType, awsDatacenterConfigName, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting eksa aws cluster %v", err)
}
response := &v1alpha1.AWSDatacenterConfig{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get eksa aws cluster response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetCurrentClusterContext(ctx context.Context, cluster *types.Cluster) (string, error) {
params := []string{"config", "view", "--kubeconfig", cluster.KubeconfigFile, "--minify", "--raw", "-o", "jsonpath={.contexts[0].name}"}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return "", fmt.Errorf("getting current cluster context name: %v", err)
}
return stdOut.String(), nil
}
func (k *Kubectl) GetEtcdadmCluster(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...KubectlOpt) (*etcdv1.EtcdadmCluster, error) {
logger.V(6).Info("Getting EtcdadmCluster CRD", "cluster", clusterName)
params := []string{"get", etcdadmClustersResourceType, fmt.Sprintf("%s-etcd", clusterName), "-o", "json"}
applyOpts(¶ms, opts...)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting etcdadmCluster: %v", err)
}
response := &etcdv1.EtcdadmCluster{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing get etcdadmCluster response: %v", err)
}
return response, nil
}
func (k *Kubectl) ValidateNodesVersion(ctx context.Context, kubeconfig string, kubeVersion v1alpha1.KubernetesVersion) error {
template := "{{range .items}}{{.status.nodeInfo.kubeletVersion}}\n{{end}}"
params := []string{"get", "nodes", "-o", "go-template", "--template", template, "--kubeconfig", kubeconfig}
buffer, err := k.Execute(ctx, params...)
if err != nil {
return err
}
scanner := bufio.NewScanner(strings.NewReader(buffer.String()))
for scanner.Scan() {
kubeletVersion := scanner.Text()
if len(kubeletVersion) != 0 {
if !strings.Contains(kubeletVersion, string(kubeVersion)) {
return fmt.Errorf("validating node version: kubernetes version %s does not match expected version %s", kubeletVersion, kubeVersion)
}
}
}
return nil
}
func (k *Kubectl) GetBundles(ctx context.Context, kubeconfigFile, name, namespace string) (*releasev1alpha1.Bundles, error) {
params := []string{"get", bundlesResourceType, name, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting Bundles with kubectl: %v", err)
}
response := &releasev1alpha1.Bundles{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing Bundles response: %v", err)
}
return response, nil
}
func (k *Kubectl) GetClusterResourceSet(ctx context.Context, kubeconfigFile, name, namespace string) (*addons.ClusterResourceSet, error) {
obj := &addons.ClusterResourceSet{}
if err := k.GetObject(ctx, clusterResourceSetResourceType, name, namespace, kubeconfigFile, obj); err != nil {
return nil, err
}
return obj, nil
}
func (k *Kubectl) GetConfigMap(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.ConfigMap, error) {
params := []string{"get", "configmap", name, "-o", "json", "--kubeconfig", kubeconfigFile, "--namespace", namespace}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("getting ConfigMap with kubectl: %v", err)
}
response := &corev1.ConfigMap{}
if err = json.Unmarshal(stdOut.Bytes(), response); err != nil {
return nil, fmt.Errorf("parsing ConfigMap response: %v", err)
}
return response, nil
}
func (k *Kubectl) SetDaemonSetImage(ctx context.Context, kubeconfigFile, name, namespace, container, image string) error {
return k.setImage(ctx, "daemonset", name, container, image, WithNamespace(namespace), WithKubeconfig(kubeconfigFile))
}
func (k *Kubectl) setImage(ctx context.Context, kind, name, container, image string, opts ...KubectlOpt) error {
params := []string{"set", "image", fmt.Sprintf("%s/%s", kind, name), fmt.Sprintf("%s=%s", container, image)}
applyOpts(¶ms, opts...)
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("setting image for %s: %v", kind, err)
}
return nil
}
func (k *Kubectl) CheckProviderExists(ctx context.Context, kubeconfigFile, name, namespace string) (bool, error) {
params := []string{"get", "namespace", fmt.Sprintf("--field-selector=metadata.name=%s", namespace), "--kubeconfig", kubeconfigFile}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return false, fmt.Errorf("checking whether provider namespace exists: %v", err)
}
if stdOut.Len() == 0 {
return false, nil
}
params = []string{"get", capiProvidersResourceType, "--namespace", namespace, fmt.Sprintf("--field-selector=metadata.name=%s", name), "--kubeconfig", kubeconfigFile}
stdOut, err = k.Execute(ctx, params...)
if err != nil {
return false, fmt.Errorf("checking whether provider exists: %v", err)
}
return stdOut.Len() != 0, nil
}
type Toleration struct {
Effect string `json:"effect,omitempty"`
Key string `json:"key,omitempty"`
Operator string `json:"operator,omitempty"`
Value string `json:"value,omitempty"`
TolerationSeconds json.Number `json:"tolerationSeconds,omitempty"`
}
func (k *Kubectl) ApplyTolerationsFromTaintsToDaemonSet(ctx context.Context, oldTaints []corev1.Taint, newTaints []corev1.Taint, dsName string, kubeconfigFile string) error {
return k.ApplyTolerationsFromTaints(ctx, oldTaints, newTaints, "ds", dsName, kubeconfigFile, "kube-system", "/spec/template/spec/tolerations")
}
func (k *Kubectl) ApplyTolerationsFromTaints(ctx context.Context, oldTaints []corev1.Taint, newTaints []corev1.Taint, resource string, name string, kubeconfigFile string, namespace string, path string) error {
params := []string{
"get", resource, name,
"-o", "jsonpath={range .spec.template.spec}{.tolerations} {end}",
"-n", namespace, "--kubeconfig", kubeconfigFile,
}
output, err := k.Execute(ctx, params...)
if err != nil {
return err
}
var appliedTolerations []Toleration
if len(output.String()) > 0 {
err = json.Unmarshal(output.Bytes(), &appliedTolerations)
if err != nil {
return fmt.Errorf("parsing toleration response: %v", err)
}
}
oldTolerationSet := make(map[Toleration]bool)
for _, taint := range oldTaints {
var toleration Toleration
toleration.Key = taint.Key
toleration.Value = taint.Value
toleration.Effect = string(taint.Effect)
toleration.Operator = "Equal"
oldTolerationSet[toleration] = true
}
var finalTolerations []string
format := "{\"key\":\"%s\",\"operator\":\"%s\",\"value\":\"%s\",\"effect\":\"%s\",\"tolerationSeconds\":%s}"
for _, toleration := range appliedTolerations {
_, present := oldTolerationSet[toleration]
if !present {
finalTolerations = append(finalTolerations, fmt.Sprintf(format, toleration.Key, toleration.Operator, toleration.Value, toleration.Effect, string(toleration.TolerationSeconds)))
}
}
for _, taint := range newTaints {
finalTolerations = append(finalTolerations, fmt.Sprintf(format, taint.Key, "Equal", taint.Value, taint.Effect, ""))
}
if len(finalTolerations) > 0 {
params := []string{
"patch", resource, name,
"--type=json", fmt.Sprintf("-p=[{\"op\": \"add\", \"path\": %s, \"value\":[%s]}]", path, strings.Join(finalTolerations, ", ")), "-n", namespace, "--kubeconfig", kubeconfigFile,
}
_, err = k.Execute(ctx, params...)
if err != nil {
return err
}
}
return nil
}
// PauseCAPICluster adds a `spec.Paused: true` to the CAPI cluster resource. This will cause all
// downstream CAPI + provider controllers to skip reconciling on the paused cluster's objects.
func (k *Kubectl) PauseCAPICluster(ctx context.Context, cluster, kubeconfig string) error {
patch := fmt.Sprintf("{\"spec\":{\"paused\":%t}}", true)
return k.MergePatchResource(ctx, capiClustersResourceType, cluster, patch, kubeconfig, constants.EksaSystemNamespace)
}
// ResumeCAPICluster removes the `spec.Paused` on the CAPI cluster resource. This will cause all
// downstream CAPI + provider controllers to resume reconciling on the paused cluster's objects
// `spec.Paused` is set to `null` to drop the field instead of setting it to `false`.
func (k *Kubectl) ResumeCAPICluster(ctx context.Context, cluster, kubeconfig string) error {
patch := "{\"spec\":{\"paused\":null}}"
return k.MergePatchResource(ctx, capiClustersResourceType, cluster, patch, kubeconfig, constants.EksaSystemNamespace)
}
// MergePatchResource patches named resource using merge patch.
func (k *Kubectl) MergePatchResource(ctx context.Context, resource, name, patch, kubeconfig, namespace string) error {
params := []string{
"patch", resource, name, "--type=merge", "-p", patch, "--kubeconfig", kubeconfig, "--namespace", namespace,
}
_, err := k.Execute(ctx, params...)
if err != nil {
return err
}
return nil
}
func (k *Kubectl) KubeconfigSecretAvailable(ctx context.Context, kubeconfig string, clusterName string, namespace string) (bool, error) {
return k.HasResource(ctx, "secret", fmt.Sprintf("%s-kubeconfig", clusterName), kubeconfig, namespace)
}
// HasResource implements KubectlRunner.
func (k *Kubectl) HasResource(ctx context.Context, resourceType string, name string, kubeconfig string, namespace string) (bool, error) {
throwaway := &unstructured.Unstructured{}
err := k.Get(ctx, resourceType, kubeconfig, throwaway, withGetResourceName(name), withNamespaceOrDefaultForGet(namespace))
if err != nil {
return false, err
}
return true, nil
}
// GetObject performs a GET call to the kube API server authenticating with a kubeconfig file
// and unmarshalls the response into the provided Object
// If the object is not found, it returns an error implementing apimachinery errors.APIStatus.
func (k *Kubectl) GetObject(ctx context.Context, resourceType, name, namespace, kubeconfig string, obj runtime.Object) error {
return k.Get(ctx, resourceType, kubeconfig, obj, withGetResourceName(name), withNamespaceOrDefaultForGet(namespace))
}
// GetClusterObject performs a GET class like above except without namespace required.
func (k *Kubectl) GetClusterObject(ctx context.Context, resourceType, name, kubeconfig string, obj runtime.Object) error {
return k.Get(ctx, resourceType, kubeconfig, obj, withGetResourceName(name), withClusterScope())
}
func (k *Kubectl) ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error {
return k.Get(ctx, resourceType, kubeconfig, list, withNamespaceOrDefaultForGet(namespace))
}
func withGetResourceName(name string) kubernetes.KubectlGetOption {
return &kubernetes.KubectlGetOptions{
Name: name,
}
}
// withNamespaceOrDefaultForGet returns an option for a get command to use the provided namespace
// or the default namespace if an empty string is provided.
// For backwards compatibility, we us the default namespace if this method is called explicitly
// with an empty namespace since some parts of the code rely on kubectl using the default namespace
// when no namespace argument is passed.
func withNamespaceOrDefaultForGet(namespace string) kubernetes.KubectlGetOption {
if namespace == "" {
namespace = "default"
}
return &kubernetes.KubectlGetOptions{
Namespace: namespace,
}
}
func withClusterScope() kubernetes.KubectlGetOption {
return &kubernetes.KubectlGetOptions{
ClusterScoped: ptr.Bool(true),
}
}
// Get performs a kubectl get command.
func (k *Kubectl) Get(ctx context.Context, resourceType, kubeconfig string, obj runtime.Object, opts ...kubernetes.KubectlGetOption) error {
o := &kubernetes.KubectlGetOptions{}
for _, opt := range opts {
opt.ApplyToGet(o)
}
clusterScoped := o.ClusterScoped != nil && *o.ClusterScoped
if o.Name != "" && o.Namespace == "" && !clusterScoped {
return errors.New("if Name is specified, Namespace is required")
}
params := getParams(resourceType, kubeconfig, o)
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("getting %s with kubectl: %v", resourceType, err)
}
if stdOut.Len() == 0 {
return newNotFoundErrorForTypeAndName(resourceType, o.Name)
}
if err = json.Unmarshal(stdOut.Bytes(), obj); err != nil {
return fmt.Errorf("parsing get %s response: %v", resourceType, err)
}
return nil
}
func getParams(resourceType, kubeconfig string, o *kubernetes.KubectlGetOptions) []string {
clusterScoped := o.ClusterScoped != nil && *o.ClusterScoped
params := []string{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", kubeconfig, resourceType}
if o.Namespace != "" {
params = append(params, "--namespace", o.Namespace)
} else if !clusterScoped {
params = append(params, "--all-namespaces")
}
if o.Name != "" {
params = append(params, o.Name)
}
return params
}
// Create performs a kubectl create command.
func (k *Kubectl) Create(ctx context.Context, kubeconfig string, obj runtime.Object) error {
b, err := yaml.Marshal(obj)
if err != nil {
return errors.Wrap(err, "marshalling object")
}
_, err = k.ExecuteWithStdin(ctx, b, "create", "-f", "-", "--kubeconfig", kubeconfig)
if isKubectlAlreadyExistsError(err) {
return newAlreadyExistsErrorForObj(obj)
}
if err != nil {
return errors.Wrapf(err, "creating %s object with kubectl", obj.GetObjectKind().GroupVersionKind())
}
return nil
}
const alreadyExistsErrorMessageSubString = "AlreadyExists"
func isKubectlAlreadyExistsError(err error) bool {
return err != nil && strings.Contains(err.Error(), alreadyExistsErrorMessageSubString)
}
const notFoundErrorMessageSubString = "NotFound"
func isKubectlNotFoundError(err error) bool {
return err != nil && strings.Contains(err.Error(), notFoundErrorMessageSubString)
}
func newAlreadyExistsErrorForObj(obj runtime.Object) error {
return apierrors.NewAlreadyExists(
groupResourceFromObj(obj),
resourceNameFromObj(obj),
)
}
func groupResourceFromObj(obj runtime.Object) schema.GroupResource {
apiObj, ok := obj.(client.Object)
if !ok {
// If this doesn't implement the client object interface,
// we don't know how to process it. This should never happen for
// any of the known types.
return schema.GroupResource{}
}
k := apiObj.GetObjectKind().GroupVersionKind()
return schema.GroupResource{
Group: k.Group,
Resource: k.Kind,
}
}
func resourceNameFromObj(obj runtime.Object) string {
apiObj, ok := obj.(client.Object)
if !ok {
// If this doesn't implement the client object interface,
// we don't know how to process it. This should never happen for
// any of the known types.
return ""
}
return apiObj.GetName()
}
func newNotFoundErrorForTypeAndName(resourceType, name string) error {
resourceTypeSplit := strings.SplitN(resourceType, ".", 2)
gr := schema.GroupResource{Resource: resourceTypeSplit[0]}
if len(resourceTypeSplit) == 2 {
gr.Group = resourceTypeSplit[1]
}
return apierrors.NewNotFound(gr, name)
}
// Replace performs a kubectl replace command.
func (k *Kubectl) Replace(ctx context.Context, kubeconfig string, obj runtime.Object) error {
// Even is --save-config=false is set (which is the default), kubectl replace will
// not only respect the last-applied annotation if present in the object, but it will update
// it with the provided state of the resource. This includes the metadata.resourceVersion. This
// breaks future uses of kubectl apply. Since those commands' input never provide the resourceVersion,
// kubectl will send a request trying to remove that field. That is obviously not a valid request, so
// it gets rejected by the kube API server. To avoid this, we simply remove the annotation when passing
// it to the replace command.
// It's not recommended to use both imperative and "declarative" commands for the same resource. Unfortunately
// our CLI makes extensive use of client side apply. Although not ideal, this mechanism allows us to perform
// updates (using replace) where idempotency is necessary while maintaining the ability to continue to use apply.
obj = removeLastAppliedAnnotation(obj)
b, err := yaml.Marshal(obj)
if err != nil {
return errors.Wrap(err, "marshalling object")
}
if _, err := k.ExecuteWithStdin(ctx, b, "replace", "-f", "-", "--kubeconfig", kubeconfig); err != nil {
return errors.Wrapf(err, "replacing %s object with kubectl", obj.GetObjectKind().GroupVersionKind())
}
return nil
}
// removeLastAppliedAnnotation deletes the kubectl last-applied annotation
// from the object if present.
func removeLastAppliedAnnotation(obj runtime.Object) runtime.Object {
apiObj, ok := obj.(client.Object)
// If this doesn't implement the client object interface,
// we don't know how to access the annotations.
// All the objects that we pass here do implement client.Client.
if !ok {
return obj
}
annotations := apiObj.GetAnnotations()
delete(annotations, lastAppliedAnnotation)
apiObj.SetAnnotations(annotations)
return apiObj
}
// Delete performs a delete command authenticating with a kubeconfig file.
func (k *Kubectl) Delete(ctx context.Context, resourceType, kubeconfig string, opts ...kubernetes.KubectlDeleteOption) error {
o := &kubernetes.KubectlDeleteOptions{}
for _, opt := range opts {
opt.ApplyToDelete(o)
}
if o.Name != "" && o.Namespace == "" {
return errors.New("if Name is specified, Namespace is required")
}
if o.Name != "" && o.HasLabels != nil {
return errors.New("options for HasLabels and Name are mutually exclusive")
}
params := deleteParams(resourceType, kubeconfig, o)
_, err := k.Execute(ctx, params...)
if isKubectlNotFoundError(err) {
return newNotFoundErrorForTypeAndName(resourceType, o.Name)
}
if err != nil {
return errors.Wrapf(err, "deleting %s", resourceType)
}
return nil
}
func deleteParams(resourceType, kubeconfig string, o *kubernetes.KubectlDeleteOptions) []string {
params := []string{"delete", "--kubeconfig", kubeconfig, resourceType}
if o.Name != "" {
params = append(params, o.Name)
} else if o.HasLabels == nil {
params = append(params, "--all")
}
if o.Namespace != "" {
params = append(params, "--namespace", o.Namespace)
} else {
params = append(params, "--all-namespaces")
}
if len(o.HasLabels) > 0 {
labelConstrains := make([]string, 0, len(o.HasLabels))
for l, v := range o.HasLabels {
labelConstrains = append(labelConstrains, l+"="+v)
}
sort.Strings(labelConstrains)
params = append(params, "--selector", strings.Join(labelConstrains, ","))
}
return params
}
func (k *Kubectl) Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error {
b, err := yaml.Marshal(obj)
if err != nil {
return fmt.Errorf("marshalling object: %v", err)
}
if _, err := k.ExecuteWithStdin(ctx, b, "apply", "-f", "-", "--kubeconfig", kubeconfig); err != nil {
return fmt.Errorf("applying object with kubectl: %v", err)
}
return nil
}
func (k *Kubectl) GetEksdRelease(ctx context.Context, name, namespace, kubeconfigFile string) (*eksdv1alpha1.Release, error) {
obj := &eksdv1alpha1.Release{}
if err := k.GetObject(ctx, eksdReleaseType, name, namespace, kubeconfigFile, obj); err != nil {
return nil, err
}
return obj, nil
}
func (k *Kubectl) GetDeployment(ctx context.Context, name, namespace, kubeconfig string) (*appsv1.Deployment, error) {
obj := &appsv1.Deployment{}
if err := k.GetObject(ctx, "deployment", name, namespace, kubeconfig, obj); err != nil {
return nil, err
}
return obj, nil
}
func (k *Kubectl) GetDaemonSet(ctx context.Context, name, namespace, kubeconfig string) (*appsv1.DaemonSet, error) {
obj := &appsv1.DaemonSet{}
if err := k.GetObject(ctx, "daemonset", name, namespace, kubeconfig, obj); err != nil {
return nil, err
}
return obj, nil
}
func (k *Kubectl) ExecuteCommand(ctx context.Context, opts ...string) (bytes.Buffer, error) {
return k.Execute(ctx, opts...)
}
// DeleteClusterObject performs a DELETE call like above except without namespace required.
func (k *Kubectl) DeleteClusterObject(ctx context.Context, resourceType, name, kubeconfig string) error {
if _, err := k.Execute(ctx, "delete", resourceType, name, "--kubeconfig", kubeconfig); err != nil {
return fmt.Errorf("deleting %s %s: %v", name, resourceType, err)
}
return nil
}
func (k *Kubectl) ExecuteFromYaml(ctx context.Context, yaml []byte, opts ...string) (bytes.Buffer, error) {
return k.ExecuteWithStdin(ctx, yaml, opts...)
}
func (k *Kubectl) SearchNutanixMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.NutanixMachineConfig, error) {
params := []string{
"get", eksaNutanixMachineResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa NutanixMachineConfigResponse: %v", err)
}
response := &NutanixMachineConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing NutanixMachineConfigResponse response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) SearchNutanixDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.NutanixDatacenterConfig, error) {
params := []string{
"get", eksaNutanixDatacenterResourceType, "-o", "json", "--kubeconfig",
kubeconfigFile, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
stdOut, err := k.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("searching eksa NutanixDatacenterConfigResponse: %v", err)
}
response := &NutanixDatacenterConfigResponse{}
err = json.Unmarshal(stdOut.Bytes(), response)
if err != nil {
return nil, fmt.Errorf("parsing NutanixDatacenterConfigResponse response: %v", err)
}
return response.Items, nil
}
func (k *Kubectl) GetEksaNutanixDatacenterConfig(ctx context.Context, nutanixDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.NutanixDatacenterConfig, error) {
response := &v1alpha1.NutanixDatacenterConfig{}
err := k.GetObject(ctx, eksaNutanixDatacenterResourceType, nutanixDatacenterConfigName, namespace, kubeconfigFile, response)
if err != nil {
return nil, fmt.Errorf("getting eksa nutanix datacenterconfig: %v", err)
}
return response, nil
}
func (k *Kubectl) GetEksaNutanixMachineConfig(ctx context.Context, nutanixMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.NutanixMachineConfig, error) {
response := &v1alpha1.NutanixMachineConfig{}
err := k.GetObject(ctx, eksaNutanixMachineResourceType, nutanixMachineConfigName, namespace, kubeconfigFile, response)
if err != nil {
return nil, fmt.Errorf("getting eksa nutanix machineconfig: %v", err)
}
return response, nil
}
func (k *Kubectl) DeleteEksaNutanixDatacenterConfig(ctx context.Context, nutanixDatacenterConfigName string, kubeconfigFile string, namespace string) error {
params := []string{"delete", eksaNutanixDatacenterResourceType, nutanixDatacenterConfigName, "--kubeconfig", kubeconfigFile, "--namespace", namespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting nutanixdatacenterconfig cluster %s apply: %v", nutanixDatacenterConfigName, err)
}
return nil
}
func (k *Kubectl) DeleteEksaNutanixMachineConfig(ctx context.Context, nutanixMachineConfigName string, kubeconfigFile string, namespace string) error {
params := []string{"delete", eksaNutanixMachineResourceType, nutanixMachineConfigName, "--kubeconfig", kubeconfigFile, "--namespace", namespace, "--ignore-not-found=true"}
_, err := k.Execute(ctx, params...)
if err != nil {
return fmt.Errorf("deleting nutanixmachineconfig cluster %s apply: %v", nutanixMachineConfigName, err)
}
return nil
}
// AllBaseboardManagements returns all the baseboard management resources in the cluster.
func (k *Kubectl) AllBaseboardManagements(ctx context.Context, kubeconfig string) ([]rufiounreleased.BaseboardManagement, error) {
stdOut, err := k.Execute(ctx,
"get", "baseboardmanagements.bmc.tinkerbell.org",
"-o", "json",
"--kubeconfig", kubeconfig,
"--all-namespaces=true",
)
if err != nil {
return nil, err
}
var list rufiounreleased.BaseboardManagementList
if err := json.Unmarshal(stdOut.Bytes(), &list); err != nil {
return nil, err
}
return list.Items, nil
}
// AllTinkerbellHardware returns all the hardware resources in the cluster.
func (k *Kubectl) AllTinkerbellHardware(ctx context.Context, kubeconfig string) ([]tinkv1alpha1.Hardware, error) {
stdOut, err := k.Execute(ctx,
"get", "hardware.tinkerbell.org",
"-o", "json",
"--kubeconfig", kubeconfig,
"--all-namespaces=true",
)
if err != nil {
return nil, err
}
var list tinkv1alpha1.HardwareList
if err := json.Unmarshal(stdOut.Bytes(), &list); err != nil {
return nil, err
}
return list.Items, nil
}
// HasCRD checks if the given CRD exists in the cluster specified by kubeconfig.
func (k *Kubectl) HasCRD(ctx context.Context, crd, kubeconfig string) (bool, error) {
_, err := k.Execute(ctx, "get", "customresourcedefinition", crd, "--kubeconfig", kubeconfig)
if err == nil {
return true, nil
}
if strings.Contains(err.Error(), "NotFound") {
return false, nil
}
return false, err
}
// DeleteCRD removes the given CRD from the cluster specified in kubeconfig.
func (k *Kubectl) DeleteCRD(ctx context.Context, crd, kubeconfig string) error {
_, err := k.Execute(ctx, "delete", "customresourcedefinition", crd, "--kubeconfig", kubeconfig)
if err != nil && !strings.Contains(err.Error(), "NotFound") {
return err
}
return nil
}
| 2,504 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"errors"
"testing"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/internal/test"
)
type (
getter func(*kubectlGetterTest) (client.Object, error)
kubectlGetterTest struct {
*kubectlTest
resourceType, name, namespace string
json string
getter getter
want client.Object
}
)
func newKubectlGetterTest(t *testing.T) *kubectlGetterTest {
return &kubectlGetterTest{
kubectlTest: newKubectlTest(t),
name: "name",
namespace: "my-ns",
}
}
func (tt *kubectlGetterTest) withResourceType(r string) *kubectlGetterTest {
tt.resourceType = r
return tt
}
func (tt *kubectlGetterTest) withoutNamespace() *kubectlGetterTest {
tt.namespace = ""
return tt
}
func (tt *kubectlGetterTest) withJson(j string) *kubectlGetterTest {
tt.json = j
return tt
}
func (tt *kubectlGetterTest) withJsonFromFile(file string) *kubectlGetterTest {
return tt.withJson(test.ReadFile(tt.t, file))
}
func (tt *kubectlGetterTest) withGetter(g getter) *kubectlGetterTest {
tt.getter = g
return tt
}
func (tt *kubectlGetterTest) andWant(o client.Object) *kubectlGetterTest {
tt.want = o
return tt
}
func (tt *kubectlGetterTest) testSuccess() {
tt.WithT.THelper()
argsWithoutName := []interface{}{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.cluster.KubeconfigFile, tt.resourceType}
if tt.namespace != "" {
argsWithoutName = append(argsWithoutName, "--namespace", tt.namespace)
}
args := append(argsWithoutName, tt.name)
tt.e.EXPECT().Execute(
tt.ctx, args...,
).Return(*bytes.NewBufferString(tt.json), nil)
got, err := tt.getter(tt)
tt.Expect(err).To(Not(HaveOccurred()), "Getter for %s should succeed", tt.resourceType)
tt.Expect(got).To(Equal(tt.want), "Getter for %s should return correct object", tt.resourceType)
}
func (tt *kubectlGetterTest) testError() {
tt.WithT.THelper()
argsWithoutName := []interface{}{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.cluster.KubeconfigFile, tt.resourceType}
if tt.namespace != "" {
argsWithoutName = append(argsWithoutName, "--namespace", tt.namespace)
}
args := append(argsWithoutName, tt.name)
tt.e.EXPECT().Execute(
tt.ctx, args...,
).Return(bytes.Buffer{}, errors.New("error in get"))
_, err := tt.getter(tt)
tt.Expect(err).To(MatchError(ContainSubstring("error in get")), "Getter for %s should fail", tt.resourceType)
}
| 96 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
_ "embed"
"encoding/json"
"errors"
"fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
addons "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
const (
secretObjectType = "addons.cluster.x-k8s.io/resource-set"
secretObjectName = "cpi-vsphere-config"
)
//go:embed testdata/nutanix/machineConfig.yaml
var nutanixMachineConfigSpec string
//go:embed testdata/nutanix/machineConfig.json
var nutanixMachineConfigSpecJSON string
//go:embed testdata/nutanix/datacenterConfig.json
var nutanixDatacenterConfigSpecJSON string
//go:embed testdata/nutanix/machineConfigs.json
var nutanixMachineConfigsJSON string
//go:embed testdata/nutanix/datacenterConfigs.json
var nutanixDatacenterConfigsJSON string
var capiClustersResourceType = fmt.Sprintf("clusters.%s", clusterv1.GroupVersion.Group)
func newKubectl(t *testing.T) (*executables.Kubectl, context.Context, *types.Cluster, *mockexecutables.MockExecutable) {
kubeconfigFile := "c.kubeconfig"
cluster := &types.Cluster{
KubeconfigFile: kubeconfigFile,
Name: "test-cluster",
}
ctx := context.Background()
ctrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(ctrl)
return executables.NewKubectl(executable), ctx, cluster, executable
}
type kubectlTest struct {
t *testing.T
*WithT
k *executables.Kubectl
ctx context.Context
cluster *types.Cluster
e *mockexecutables.MockExecutable
namespace string
kubeconfig string
}
func newKubectlTest(t *testing.T) *kubectlTest {
k, ctx, cluster, e := newKubectl(t)
return &kubectlTest{
t: t,
k: k,
ctx: ctx,
cluster: cluster,
e: e,
WithT: NewWithT(t),
namespace: "namespace",
kubeconfig: cluster.KubeconfigFile,
}
}
func TestKubectlGetCAPIMachines(t *testing.T) {
t.Parallel()
g := NewWithT(t)
k, ctx, cluster, e := newKubectl(t)
machinesResponseBuffer := bytes.Buffer{}
machinesResponseBuffer.WriteString(test.ReadFile(t, "testdata/kubectl_machines_no_conditions.json"))
tests := []struct {
name string
buffer bytes.Buffer
machineLength int
execErr error
expectedErr error
}{
{
name: "GetCAPIMachines_Success",
buffer: machinesResponseBuffer,
machineLength: 2,
execErr: nil,
expectedErr: nil,
},
{
name: "GetCAPIMachines_Success",
buffer: bytes.Buffer{},
machineLength: 0,
execErr: nil,
expectedErr: errors.New("parsing get machines response: unexpected end of JSON input"),
},
{
name: "GetCAPIMachines_Success",
buffer: bytes.Buffer{},
machineLength: 0,
execErr: errors.New("exec error"),
expectedErr: errors.New("getting machines: exec error"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
e.EXPECT().Execute(ctx,
"get", "machines.cluster.x-k8s.io",
"-o", "json",
"--kubeconfig", cluster.KubeconfigFile,
"--selector=cluster.x-k8s.io/cluster-name="+cluster.Name,
"--namespace", constants.EksaSystemNamespace,
).Return(test.buffer, test.execErr)
machines, err := k.GetCAPIMachines(ctx, cluster, cluster.Name)
if test.expectedErr != nil {
g.Expect(err).To(MatchError(test.expectedErr))
} else {
g.Expect(err).To(Not(HaveOccurred()))
}
g.Expect(len(machines)).To(Equal(test.machineLength))
})
}
}
func TestKubectlApplyManifestSuccess(t *testing.T) {
t.Parallel()
spec := "specfile"
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"apply", "-f", spec, "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.ApplyManifest(ctx, cluster.KubeconfigFile, spec); err != nil {
t.Errorf("Kubectl.ApplyManifest() error = %v, want nil", err)
}
}
func TestKubectlApplyManifestError(t *testing.T) {
t.Parallel()
spec := "specfile"
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"apply", "-f", spec, "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.ApplyManifest(ctx, cluster.KubeconfigFile, spec); err == nil {
t.Errorf("Kubectl.ApplyManifest() error = nil, want not nil")
}
}
func TestKubectlApplyKubeSpecFromBytesSuccess(t *testing.T) {
t.Parallel()
var data []byte
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"apply", "-f", "-", "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().ExecuteWithStdin(ctx, data, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.ApplyKubeSpecFromBytes(ctx, cluster, data); err != nil {
t.Errorf("Kubectl.ApplyKubeSpecFromBytes() error = %v, want nil", err)
}
}
func TestKubectlApplyKubeSpecFromBytesError(t *testing.T) {
t.Parallel()
var data []byte
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"apply", "-f", "-", "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().ExecuteWithStdin(ctx, data, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.ApplyKubeSpecFromBytes(ctx, cluster, data); err == nil {
t.Errorf("Kubectl.ApplyKubeSpecFromBytes() error = nil, want not nil")
}
}
func TestKubectlDeleteManifestSuccess(t *testing.T) {
t.Parallel()
spec := "specfile"
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"delete", "-f", spec, "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.DeleteManifest(ctx, cluster.KubeconfigFile, spec); err != nil {
t.Errorf("Kubectl.DeleteManifest() error = %v, want nil", err)
}
}
func TestKubectlDeleteManifestError(t *testing.T) {
t.Parallel()
spec := "specfile"
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"delete", "-f", spec, "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.DeleteManifest(ctx, cluster.KubeconfigFile, spec); err == nil {
t.Errorf("Kubectl.DeleteManifest() error = nil, want not nil")
}
}
func TestKubectlDeleteKubeSpecFromBytesSuccess(t *testing.T) {
t.Parallel()
var data []byte
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"delete", "-f", "-", "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().ExecuteWithStdin(ctx, data, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.DeleteKubeSpecFromBytes(ctx, cluster, data); err != nil {
t.Errorf("Kubectl.DeleteKubeSpecFromBytes() error = %v, want nil", err)
}
}
func TestKubectlDeleteSpecFromBytesError(t *testing.T) {
t.Parallel()
var data []byte
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"delete", "-f", "-", "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().ExecuteWithStdin(ctx, data, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.DeleteKubeSpecFromBytes(ctx, cluster, data); err == nil {
t.Errorf("Kubectl.DeleteKubeSpecFromBytes() error = nil, want not nil")
}
}
func TestKubectlApplyKubeSpecFromBytesWithNamespaceSuccess(t *testing.T) {
t.Parallel()
var data []byte = []byte("someData")
var namespace string
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"apply", "-f", "-", "--namespace", namespace, "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().ExecuteWithStdin(ctx, data, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, data, namespace); err != nil {
t.Errorf("Kubectl.ApplyKubeSpecFromBytesWithNamespace() error = %v, want nil", err)
}
}
func TestKubectlApplyKubeSpecFromBytesWithNamespaceSuccessWithEmptyInput(t *testing.T) {
t.Parallel()
var data []byte
var namespace string
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().ExecuteWithStdin(ctx, data, gomock.Any()).Times(0)
if err := k.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, data, namespace); err != nil {
t.Errorf("Kubectl.ApplyKubeSpecFromBytesWithNamespace() error = %v, want nil", err)
}
}
func TestKubectlApplyKubeSpecFromBytesWithNamespaceError(t *testing.T) {
t.Parallel()
var data []byte = []byte("someData")
var namespace string
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"apply", "-f", "-", "--namespace", namespace, "--kubeconfig", cluster.KubeconfigFile}
e.EXPECT().ExecuteWithStdin(ctx, data, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, data, namespace); err == nil {
t.Errorf("Kubectl.ApplyKubeSpecFromBytes() error = nil, want not nil")
}
}
func TestKubectlCreateNamespaceSuccess(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"create", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.CreateNamespace(ctx, kubeconfig, namespace); err != nil {
t.Errorf("Kubectl.CreateNamespace() error = %v, want nil", err)
}
}
func TestKubectlCreateNamespaceError(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"create", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.CreateNamespace(ctx, kubeconfig, namespace); err == nil {
t.Errorf("Kubectl.CreateNamespace() error = nil, want not nil")
}
}
func TestKubectlCreateNamespaceIfNotPresentSuccessOnNamespacePresent(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParamForGetNamespace := []string{"get", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParamForGetNamespace)).Return(bytes.Buffer{}, nil)
if err := k.CreateNamespaceIfNotPresent(ctx, kubeconfig, namespace); err != nil {
t.Errorf("Kubectl.CreateNamespaceIfNotPresent() error = %v, want nil", err)
}
}
func TestKubectlCreateNamespaceIfNotPresentSuccessOnNamespaceNotPresent(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParamForGetNamespace := []string{"get", "namespace", namespace, "--kubeconfig", kubeconfig}
expectedParamForCreateNamespace := []string{"create", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParamForGetNamespace)).Return(bytes.Buffer{}, errors.New("not found"))
e.EXPECT().Execute(ctx, gomock.Eq(expectedParamForCreateNamespace)).Return(bytes.Buffer{}, nil)
if err := k.CreateNamespaceIfNotPresent(ctx, kubeconfig, namespace); err != nil {
t.Errorf("Kubectl.CreateNamespaceIfNotPresent() error = %v, want nil", err)
}
}
func TestKubectlCreateNamespaceIfNotPresentFailureOnNamespaceCreationFailure(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParamForGetNamespace := []string{"get", "namespace", namespace, "--kubeconfig", kubeconfig}
expectedParamForCreateNamespace := []string{"create", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParamForGetNamespace)).Return(bytes.Buffer{}, errors.New("not found"))
e.EXPECT().Execute(ctx, gomock.Eq(expectedParamForCreateNamespace)).Return(bytes.Buffer{}, errors.New("exception"))
if err := k.CreateNamespaceIfNotPresent(ctx, kubeconfig, namespace); err == nil {
t.Errorf("Kubectl.CreateNamespaceIfNotPresent() error = nil, want not nil")
}
}
func TestKubectlDeleteNamespaceSuccess(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"delete", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.DeleteNamespace(ctx, kubeconfig, namespace); err != nil {
t.Errorf("Kubectl.DeleteNamespace() error = %v, want nil", err)
}
}
func TestKubectlDeleteNamespaceError(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"delete", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.DeleteNamespace(ctx, kubeconfig, namespace); err == nil {
t.Errorf("Kubectl.DeleteNamespace() error = nil, want not nil")
}
}
func TestKubectlDeleteSecretSuccess(t *testing.T) {
t.Parallel()
var secretName, namespace string
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"delete", "secret", secretName, "--kubeconfig", cluster.KubeconfigFile, "--namespace", namespace}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.DeleteSecret(ctx, cluster, secretName, namespace); err != nil {
t.Errorf("Kubectl.DeleteNamespace() error = %v, want nil", err)
}
}
func TestKubectlGetNamespaceSuccess(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"get", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.GetNamespace(ctx, kubeconfig, namespace); err != nil {
t.Errorf("Kubectl.GetNamespace() error = %v, want nil", err)
}
}
func TestKubectlGetNamespaceError(t *testing.T) {
t.Parallel()
var kubeconfig, namespace string
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"get", "namespace", namespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.GetNamespace(ctx, kubeconfig, namespace); err == nil {
t.Errorf("Kubectl.GetNamespace() error = nil, want not nil")
}
}
func TestKubectlWaitSuccess(t *testing.T) {
t.Parallel()
var timeout, kubeconfig, forCondition, property, namespace string
k, ctx, _, e := newKubectl(t)
// KubeCtl wait does not tolerate blank timeout values.
// It also converts timeouts provided to seconds before actually invoking kubectl wait.
timeout = "1m"
expectedTimeout := "60.00s"
expectedParam := []string{"wait", "--timeout", expectedTimeout, "--for=condition=" + forCondition, property, "--kubeconfig", kubeconfig, "-n", namespace}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.Wait(ctx, kubeconfig, timeout, forCondition, property, namespace); err != nil {
t.Errorf("Kubectl.Wait() error = %v, want nil", err)
}
}
func TestKubectlWaitBadTimeout(t *testing.T) {
t.Parallel()
var timeout, kubeconfig, forCondition, property, namespace string
k, ctx, _, _ := newKubectl(t)
timeout = "1y"
if err := k.Wait(ctx, kubeconfig, timeout, forCondition, property, namespace); err == nil {
t.Errorf("Kubectl.Wait() error = nil, want duration parse error")
}
timeout = "-1s"
if err := k.Wait(ctx, kubeconfig, timeout, forCondition, property, namespace); err == nil {
t.Errorf("Kubectl.Wait() error = nil, want duration parse error")
}
}
func TestKubectlWaitRetryPolicy(t *testing.T) {
t.Parallel()
connectionRefusedError := fmt.Errorf("The connection to the server 127.0.0.1:56789 was refused")
ioTimeoutError := fmt.Errorf("Unable to connect to the server 127.0.0.1:56789, i/o timeout\n")
miscellaneousError := fmt.Errorf("Some other random miscellaneous error")
k := executables.NewKubectl(nil)
_, wait := executables.KubectlWaitRetryPolicy(k, 1, connectionRefusedError)
if wait != 10*time.Second {
t.Errorf("kubectlWaitRetryPolicy didn't correctly calculate first retry wait for connection refused")
}
_, wait = executables.KubectlWaitRetryPolicy(k, -1, connectionRefusedError)
if wait != 10*time.Second {
t.Errorf("kubectlWaitRetryPolicy didn't correctly protect for total retries < 0")
}
_, wait = executables.KubectlWaitRetryPolicy(k, 2, connectionRefusedError)
if wait != 15*time.Second {
t.Errorf("kubectlWaitRetryPolicy didn't correctly protect for second retry wait")
}
_, wait = executables.KubectlWaitRetryPolicy(k, 1, ioTimeoutError)
if wait != 10*time.Second {
t.Errorf("kubectlWaitRetryPolicy didn't correctly calculate first retry wait for ioTimeout")
}
retry, _ := executables.KubectlWaitRetryPolicy(k, 1, miscellaneousError)
if retry != false {
t.Errorf("kubectlWaitRetryPolicy didn't not-retry on non-network error")
}
}
func TestWaitForTimeout(t *testing.T) {
t.Parallel()
k := executables.Kubectl{}
timeoutTime := time.Now()
err := executables.CallKubectlPrivateWait(&k, nil, "", timeoutTime, "myCondition", "myProperty", "")
if err == nil || err.Error() != "error: timed out waiting for condition myCondition on myProperty" {
t.Errorf("kubectl private wait didn't timeout")
}
}
func TestKubectlWaitForService(t *testing.T) {
t.Parallel()
testSvc := &corev1.Service{
Spec: corev1.ServiceSpec{
ClusterIP: "192.168.1.2",
},
}
respJSON, err := json.Marshal(testSvc)
if err != nil {
t.Errorf("marshaling test service: %s", err)
}
ret := bytes.NewBuffer(respJSON)
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", "kubeconfig", "service", "--namespace", "eksa-packages", "test"}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(*ret, nil).AnyTimes()
if err := k.WaitForService(ctx, "kubeconfig", "5m", "test", "eksa-packages"); err != nil {
t.Errorf("Kubectl.WaitForService() error = %v, want nil", err)
}
}
func TestKubectlWaitForServiceWithLoadBalancer(t *testing.T) {
t.Parallel()
testSvc := &corev1.Service{
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{
IP: "192.168.1.1",
},
},
},
},
}
respJSON, err := json.Marshal(testSvc)
if err != nil {
t.Errorf("marshaling test service: %s", err)
}
ret := bytes.NewBuffer(respJSON)
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", "kubeconfig", "service", "--namespace", "eksa-packages", "test"}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(*ret, nil).AnyTimes()
if err := k.WaitForService(ctx, "kubeconfig", "5m", "test", "eksa-packages"); err != nil {
t.Errorf("Kubectl.WaitForService() error = %v, want nil", err)
}
}
func TestKubectlWaitForServiceTimedOut(t *testing.T) {
t.Parallel()
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", "kubeconfig", "service", "--namespace", "eksa-packages", "test"}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil).AnyTimes()
if err := k.WaitForService(ctx, "kubeconfig", "2s", "test", "eksa-packages"); err == nil {
t.Errorf("Kubectl.WaitForService() error = nil, want %v", context.Canceled)
}
}
func TestKubectlWaitForServiceBadTimeout(t *testing.T) {
t.Parallel()
k, ctx, _, _ := newKubectl(t)
if err := k.WaitForService(ctx, "kubeconfig", "abc", "test", "eksa-packages"); err == nil {
t.Errorf("Kubectl.WaitForService() error = nil, want parsing duration error")
}
}
func TestKubectlWaitError(t *testing.T) {
t.Parallel()
var timeout, kubeconfig, forCondition, property, namespace string
k, ctx, _, e := newKubectl(t)
timeout = "1m"
expectedTimeout := "60.00s"
expectedParam := []string{"wait", "--timeout", expectedTimeout, "--for=condition=" + forCondition, property, "--kubeconfig", kubeconfig, "-n", namespace}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.Wait(ctx, kubeconfig, timeout, forCondition, property, namespace); err == nil {
t.Errorf("Kubectl.Wait() error = nil, want not nil")
}
}
func TestKubectlWaitNetworkErrorWithRetries(t *testing.T) {
t.Parallel()
var timeout, kubeconfig, forCondition, property, namespace string
k, ctx, _, e := newKubectl(t)
// Rebuild the kubectl executable with custom wait params so we avoid extremely long tests.
k = executables.NewKubectl(e,
executables.WithKubectlNetworkFaultBaseRetryTime(1*time.Millisecond),
executables.WithNetworkFaultBackoffFactor(1),
)
timeout = "1m"
expectedTimeout := "60.00s"
expectedParam := []string{"wait", "--timeout", expectedTimeout, "--for=condition=" + forCondition, property, "--kubeconfig", kubeconfig, "-n", namespace}
firstTry := e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("The connection to the server 127.0.0.1:56789 was refused")) //nolint:revive // The format of the message it's important here since the policy code depends on it
// Kubectl Wait is intelligently adjusting the timeout param on retries. This is hard to predict from within the test
// so I'm not having the mock validate params on the retried calls.
secondTry := e.EXPECT().Execute(ctx, gomock.Any()).Return(bytes.Buffer{}, errors.New("Unable to connect to the server: 127.0.0.1: 56789, i/o timeout.\n")) //nolint:revive // The format of the message it's important here since the policy code depends on it
thirdTry := e.EXPECT().Execute(ctx, gomock.Any()).Return(bytes.Buffer{}, nil)
gomock.InOrder(
firstTry,
secondTry,
thirdTry,
)
if err := k.Wait(ctx, kubeconfig, timeout, forCondition, property, namespace); err != nil {
t.Errorf("Kubectl.Wait() error = %v, want nil", err)
}
}
func TestKubectlSearchCloudStackMachineConfigs(t *testing.T) {
t.Parallel()
var kubeconfig, namespace, name string
buffer := bytes.Buffer{}
buffer.WriteString(test.ReadFile(t, "testdata/kubectl_no_cs_machineconfigs.json"))
k, ctx, _, e := newKubectl(t)
expectedParam := []string{
"get", fmt.Sprintf("cloudstackmachineconfigs.%s", v1alpha1.GroupVersion.Group), "-o", "json", "--kubeconfig",
kubeconfig, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(buffer, nil)
mc, err := k.SearchCloudStackMachineConfig(ctx, name, kubeconfig, namespace)
if err != nil {
t.Errorf("Kubectl.SearchCloudStackMachineConfig() error = %v, want nil", err)
}
if len(mc) > 0 {
t.Errorf("expected 0 machine configs, got %d", len(mc))
}
}
func TestKubectlSearchCloudStackDatacenterConfigs(t *testing.T) {
t.Parallel()
var kubeconfig, namespace, name string
buffer := bytes.Buffer{}
buffer.WriteString(test.ReadFile(t, "testdata/kubectl_no_cs_datacenterconfigs.json"))
k, ctx, _, e := newKubectl(t)
expectedParam := []string{
"get", fmt.Sprintf("cloudstackdatacenterconfigs.%s", v1alpha1.GroupVersion.Group), "-o", "json", "--kubeconfig",
kubeconfig, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(buffer, nil)
mc, err := k.SearchCloudStackDatacenterConfig(ctx, name, kubeconfig, namespace)
if err != nil {
t.Errorf("Kubectl.SearchCloudStackDatacenterConfig() error = %v, want nil", err)
}
if len(mc) > 0 {
t.Errorf("expected 0 datacenter configs, got %d", len(mc))
}
}
func TestCloudStackWorkerNodesMachineTemplate(t *testing.T) {
t.Parallel()
var kubeconfig, namespace, clusterName, machineTemplateName string
machineTemplateNameBuffer := bytes.NewBufferString(machineTemplateName)
machineTemplatesBuffer := bytes.NewBufferString(test.ReadFile(t, "testdata/kubectl_no_cs_machineconfigs.json"))
k, ctx, _, e := newKubectl(t)
expectedParam1 := []string{
"get", "machinedeployments.cluster.x-k8s.io", fmt.Sprintf("%s-md-0", clusterName), "-o", "go-template",
"--template", "{{.spec.template.spec.infrastructureRef.name}}", "--kubeconfig", kubeconfig, "--namespace", namespace,
}
expectedParam2 := []string{
"get", "cloudstackmachinetemplates.infrastructure.cluster.x-k8s.io", machineTemplateName, "-o", "go-template", "--template",
"{{.spec.template.spec}}", "-o", "yaml", "--kubeconfig", kubeconfig, "--namespace", namespace,
}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam1)).Return(*machineTemplateNameBuffer, nil)
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam2)).Return(*machineTemplatesBuffer, nil)
_, err := k.CloudstackWorkerNodesMachineTemplate(ctx, clusterName, kubeconfig, namespace)
if err != nil {
t.Errorf("Kubectl.GetNamespace() error = %v, want nil", err)
}
}
func TestVsphereWorkerNodesMachineTemplate(t *testing.T) {
t.Parallel()
var kubeconfig, namespace, clusterName, machineTemplateName string
machineTemplateNameBuffer := bytes.NewBufferString(machineTemplateName)
machineTemplatesBuffer := bytes.NewBufferString(test.ReadFile(t, "testdata/kubectl_no_cs_machineconfigs.json"))
k, ctx, _, e := newKubectl(t)
expectedParam1 := []string{
"get", "machinedeployments.cluster.x-k8s.io", fmt.Sprintf("%s-md-0", clusterName), "-o", "go-template",
"--template", "{{.spec.template.spec.infrastructureRef.name}}", "--kubeconfig", kubeconfig, "--namespace", namespace,
}
expectedParam2 := []string{
"get", "vspheremachinetemplates.infrastructure.cluster.x-k8s.io", machineTemplateName, "-o", "go-template", "--template",
"{{.spec.template.spec}}", "-o", "yaml", "--kubeconfig", kubeconfig, "--namespace", namespace,
}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam1)).Return(*machineTemplateNameBuffer, nil)
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam2)).Return(*machineTemplatesBuffer, nil)
_, err := k.VsphereWorkerNodesMachineTemplate(ctx, clusterName, kubeconfig, namespace)
if err != nil {
t.Errorf("Kubectl.GetNamespace() error = %v, want nil", err)
}
}
func TestKubectlSaveLogSuccess(t *testing.T) {
t.Parallel()
filename := "testfile"
_, writer := test.NewWriter(t)
deployment := &types.Deployment{
Namespace: "namespace",
Name: "testname",
Container: "container",
}
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"--kubeconfig", cluster.KubeconfigFile, "logs", fmt.Sprintf("deployment/%s", deployment.Name), "-n", deployment.Namespace, "-c", deployment.Container}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.SaveLog(ctx, cluster, deployment, filename, writer); err != nil {
t.Errorf("Kubectl.SaveLog() error = %v, want nil", err)
}
}
func TestKubectlSaveLogError(t *testing.T) {
t.Parallel()
filename := "testfile"
_, writer := test.NewWriter(t)
deployment := &types.Deployment{
Namespace: "namespace",
Name: "testname",
Container: "container",
}
k, ctx, cluster, e := newKubectl(t)
expectedParam := []string{"--kubeconfig", cluster.KubeconfigFile, "logs", fmt.Sprintf("deployment/%s", deployment.Name), "-n", deployment.Namespace, "-c", deployment.Container}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.SaveLog(ctx, cluster, deployment, filename, writer); err == nil {
t.Errorf("Kubectl.SaveLog() error = nil, want not nil")
}
}
func TestKubectlDeleteClusterSuccess(t *testing.T) {
t.Parallel()
kubeconfigFile := "c.kubeconfig"
managementCluster := &types.Cluster{
KubeconfigFile: kubeconfigFile,
}
clusterToDelete := &types.Cluster{
KubeconfigFile: kubeconfigFile,
}
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"delete", capiClustersResourceType, clusterToDelete.Name, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
if err := k.DeleteCluster(ctx, managementCluster, clusterToDelete); err != nil {
t.Errorf("Kubectl.DeleteCluster() error = %v, want nil", err)
}
}
func TestKubectlDeleteClusterError(t *testing.T) {
t.Parallel()
kubeconfigFile := "c.kubeconfig"
managementCluster := &types.Cluster{
KubeconfigFile: kubeconfigFile,
}
clusterToDelete := &types.Cluster{
KubeconfigFile: kubeconfigFile,
}
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"delete", capiClustersResourceType, clusterToDelete.Name, "--kubeconfig", managementCluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, errors.New("error from execute"))
if err := k.DeleteCluster(ctx, managementCluster, clusterToDelete); err == nil {
t.Errorf("Kubectl.DeleteCluster() error = nil, want not nil")
}
}
func TestKubectlGetMachines(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantMachines []types.Machine
}{
{
testName: "no machines",
jsonResponseFile: "testdata/kubectl_no_machines.json",
wantMachines: []types.Machine{},
},
{
testName: "machines with no node ref",
jsonResponseFile: "testdata/kubectl_machines_no_node_ref_no_labels.json",
wantMachines: []types.Machine{
{
Metadata: types.MachineMetadata{
Name: "eksa-test-capd-control-plane-5nfdg",
},
Status: types.MachineStatus{
Conditions: types.Conditions{
{
Status: "True",
Type: "Ready",
},
{
Status: "True",
Type: "APIServerPodHealthy",
},
{
Status: "True",
Type: "BootstrapReady",
},
{
Status: "True",
Type: "ControllerManagerPodHealthy",
},
{
Status: "True",
Type: "EtcdMemberHealthy",
},
{
Status: "True",
Type: "EtcdPodHealthy",
},
{
Status: "True",
Type: "InfrastructureReady",
},
{
Status: "True",
Type: "NodeHealthy",
},
{
Status: "True",
Type: "SchedulerPodHealthy",
},
},
},
},
{
Metadata: types.MachineMetadata{
Name: "eksa-test-capd-md-0-bb7885f6f-gkb85",
},
Status: types.MachineStatus{
Conditions: types.Conditions{
{
Status: "True",
Type: "Ready",
},
{
Status: "True",
Type: "BootstrapReady",
},
{
Status: "True",
Type: "InfrastructureReady",
},
{
Status: "True",
Type: "NodeHealthy",
},
},
},
},
},
},
{
testName: "machines with no conditions",
jsonResponseFile: "testdata/kubectl_machines_no_conditions.json",
wantMachines: []types.Machine{
{
Metadata: types.MachineMetadata{
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "eksa-test-capd",
"cluster.x-k8s.io/control-plane": "",
},
Name: "eksa-test-capd-control-plane-5nfdg",
},
Status: types.MachineStatus{
NodeRef: &types.ResourceRef{
APIVersion: "v1",
Kind: "Node",
Name: "eksa-test-capd-control-plane-5nfdg",
},
},
},
{
Metadata: types.MachineMetadata{
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "eksa-test-capd",
"cluster.x-k8s.io/deployment-name": "eksa-test-capd-md-0",
"machine-template-hash": "663441929",
},
Name: "eksa-test-capd-md-0-bb7885f6f-gkb85",
},
Status: types.MachineStatus{
NodeRef: &types.ResourceRef{
APIVersion: "v1",
Kind: "Node",
Name: "eksa-test-capd-md-0-bb7885f6f-gkb85",
},
},
},
},
},
{
testName: "machines with node ref",
jsonResponseFile: "testdata/kubectl_machines_with_node_ref.json",
wantMachines: []types.Machine{
{
Metadata: types.MachineMetadata{
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "eksa-test-capd",
"cluster.x-k8s.io/control-plane": "",
},
Name: "eksa-test-capd-control-plane-5nfdg",
},
Status: types.MachineStatus{
NodeRef: &types.ResourceRef{
APIVersion: "v1",
Kind: "Node",
Name: "eksa-test-capd-control-plane-5nfdg",
},
Conditions: types.Conditions{
{
Status: "True",
Type: "Ready",
},
{
Status: "True",
Type: "APIServerPodHealthy",
},
{
Status: "True",
Type: "BootstrapReady",
},
{
Status: "True",
Type: "ControllerManagerPodHealthy",
},
{
Status: "True",
Type: "EtcdMemberHealthy",
},
{
Status: "True",
Type: "EtcdPodHealthy",
},
{
Status: "True",
Type: "InfrastructureReady",
},
{
Status: "True",
Type: "NodeHealthy",
},
{
Status: "True",
Type: "SchedulerPodHealthy",
},
},
},
},
{
Metadata: types.MachineMetadata{
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "eksa-test-capd",
"cluster.x-k8s.io/deployment-name": "eksa-test-capd-md-0",
"machine-template-hash": "663441929",
},
Name: "eksa-test-capd-md-0-bb7885f6f-gkb85",
},
Status: types.MachineStatus{
NodeRef: &types.ResourceRef{
APIVersion: "v1",
Kind: "Node",
Name: "eksa-test-capd-md-0-bb7885f6f-gkb85",
},
Conditions: types.Conditions{
{
Status: "True",
Type: "Ready",
},
{
Status: "True",
Type: "BootstrapReady",
},
{
Status: "True",
Type: "InfrastructureReady",
},
{
Status: "True",
Type: "NodeHealthy",
},
},
},
},
},
},
{
testName: "etcd machines",
jsonResponseFile: "testdata/kubectl_etcd_machines_no_node_ref.json",
wantMachines: []types.Machine{
{
Metadata: types.MachineMetadata{
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "eksa-test-capd",
"cluster.x-k8s.io/etcd-cluster": "",
},
Name: "eksa-test-capd-control-plane-5nfdg",
},
Status: types.MachineStatus{
Conditions: types.Conditions{
{
Status: "True",
Type: "Ready",
},
{
Status: "True",
Type: "APIServerPodHealthy",
},
{
Status: "True",
Type: "BootstrapReady",
},
{
Status: "True",
Type: "ControllerManagerPodHealthy",
},
{
Status: "True",
Type: "EtcdMemberHealthy",
},
{
Status: "True",
Type: "EtcdPodHealthy",
},
{
Status: "True",
Type: "InfrastructureReady",
},
{
Status: "True",
Type: "NodeHealthy",
},
{
Status: "True",
Type: "SchedulerPodHealthy",
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
fileContent := test.ReadFile(t, tt.jsonResponseFile)
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{
"get", "machines.cluster.x-k8s.io", "-o", "json", "--kubeconfig", cluster.KubeconfigFile,
"--selector=cluster.x-k8s.io/cluster-name=" + cluster.Name,
"--namespace", constants.EksaSystemNamespace,
}).Return(*bytes.NewBufferString(fileContent), nil)
gotMachines, err := k.GetMachines(ctx, cluster, cluster.Name)
if err != nil {
t.Fatalf("Kubectl.GetMachines() error = %v, want nil", err)
}
if !reflect.DeepEqual(gotMachines, tt.wantMachines) {
t.Fatalf("Kubectl.GetMachines() machines = %+v, want %+v", gotMachines, tt.wantMachines)
}
})
}
}
func TestKubectlGetEksaCloudStackMachineConfig(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantMachines *v1alpha1.CloudStackMachineConfig
}{
{
testName: "no machines",
jsonResponseFile: "testdata/kubectl_no_cs_machineconfigs.json",
wantMachines: &v1alpha1.CloudStackMachineConfig{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
},
},
{
testName: "one machineconfig",
jsonResponseFile: "testdata/kubectl_eksa_cs_machineconfig.json",
wantMachines: &v1alpha1.CloudStackMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: "CloudStackMachineConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{Name: "test-etcd"},
Spec: v1alpha1.CloudStackMachineConfigSpec{
Template: v1alpha1.CloudStackResourceIdentifier{
Name: "testTemplate",
},
ComputeOffering: v1alpha1.CloudStackResourceIdentifier{
Name: "testOffering",
},
DiskOffering: &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "testOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []v1alpha1.UserConfiguration{
{
Name: "maxdrib",
SshAuthorizedKeys: []string{"ssh-rsa test123 hi"},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
fileContent := test.ReadFile(t, tt.jsonResponseFile)
k, ctx, cluster, e := newKubectl(t)
machineConfigName := "testMachineConfig"
e.EXPECT().Execute(ctx, []string{
"get", "--ignore-not-found",
"-o", "json", "--kubeconfig", cluster.KubeconfigFile,
"cloudstackmachineconfigs.anywhere.eks.amazonaws.com",
"--namespace", constants.EksaSystemNamespace,
machineConfigName,
}).Return(*bytes.NewBufferString(fileContent), nil)
gotMachines, err := k.GetEksaCloudStackMachineConfig(ctx, machineConfigName, cluster.KubeconfigFile, constants.EksaSystemNamespace)
if err != nil {
t.Fatalf("Kubectl.GetEksaCloudStackMachineConfig() error = %v, want nil", err)
}
if !reflect.DeepEqual(gotMachines, tt.wantMachines) {
t.Fatalf("Kubectl.GetEksaCloudStackMachineConfig() machines = %+v, want %+v", gotMachines, tt.wantMachines)
}
})
}
}
func TestKubectlGetEksaCloudStackDatacenterConfig(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantDatacenter *v1alpha1.CloudStackDatacenterConfig
}{
{
testName: "no datacenter",
jsonResponseFile: "testdata/kubectl_no_cs_datacenterconfigs.json",
wantDatacenter: &v1alpha1.CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
},
},
{
testName: "one datacenter availability zones",
jsonResponseFile: "testdata/kubectl_eksa_cs_datacenterconfig_az.json",
wantDatacenter: &v1alpha1.CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: "CloudStackDatacenterConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: v1alpha1.CloudStackDatacenterConfigSpec{
AvailabilityZones: []v1alpha1.CloudStackAvailabilityZone{{
Name: "default-az-0",
Zone: v1alpha1.CloudStackZone{
Name: "testZone",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "testNetwork",
},
},
CredentialsRef: "global",
Domain: "testDomain",
Account: "testAccount",
}},
},
},
},
{
testName: "one datacenter legacy zones",
jsonResponseFile: "testdata/kubectl_eksa_cs_datacenterconfig.json",
wantDatacenter: &v1alpha1.CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: "CloudStackDatacenterConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: v1alpha1.CloudStackDatacenterConfigSpec{
Domain: "testDomain",
Account: "testAccount",
Zones: []v1alpha1.CloudStackZone{
{
Name: "testZone",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "testNetwork",
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
fileContent := test.ReadFile(t, tt.jsonResponseFile)
k, ctx, cluster, e := newKubectl(t)
datacenterConfigName := "testDatacenterConfig"
e.EXPECT().Execute(ctx, []string{
"get", "--ignore-not-found",
"-o", "json", "--kubeconfig", cluster.KubeconfigFile,
"cloudstackdatacenterconfigs.anywhere.eks.amazonaws.com",
"--namespace", constants.EksaSystemNamespace,
datacenterConfigName,
}).Return(*bytes.NewBufferString(fileContent), nil)
gotDatacenter, err := k.GetEksaCloudStackDatacenterConfig(ctx, datacenterConfigName, cluster.KubeconfigFile, constants.EksaSystemNamespace)
if err != nil {
t.Fatalf("Kubectl.GetEksaCloudStackDatacenterConfig() error = %v, want nil", err)
}
if !gotDatacenter.Spec.Equal(&tt.wantDatacenter.Spec) {
t.Fatalf("Kubectl.GetEksaCloudStackDatacenterConfig() machines = %+v, want %+v", gotDatacenter, tt.wantDatacenter)
}
})
}
}
func TestKubectlLoadSecret(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
params []string
wantErr error
}{
{
testName: "SuccessScenario",
params: []string{"create", "secret", "generic", secretObjectName, "--type", secretObjectType, "--from-literal", "test_cluster", "--kubeconfig", "test_cluster.kind.kubeconfig", "--namespace", constants.EksaSystemNamespace},
wantErr: nil,
},
{
testName: "ErrorScenario",
params: []string{"create", "secret", "generic", secretObjectName, "--type", secretObjectType, "--from-literal", "test_cluster", "--kubeconfig", "test_cluster.kind.kubeconfig", "--namespace", constants.EksaSystemNamespace},
wantErr: errors.New("error loading secret: "),
},
}
for _, tc := range tests {
t.Run(tc.testName, func(tt *testing.T) {
k, ctx, _, e := newKubectl(t)
e.EXPECT().Execute(ctx, tc.params).Return(bytes.Buffer{}, tc.wantErr)
err := k.LoadSecret(ctx, "test_cluster", secretObjectType, secretObjectName, "test_cluster.kind.kubeconfig")
if (tc.wantErr != nil && err == nil) && !reflect.DeepEqual(tc.wantErr, err) {
t.Errorf("%v got = %v, want %v", tc.testName, err, tc.wantErr)
}
})
}
}
func TestKubectlGetSecretFromNamespaceSuccess(t *testing.T) {
t.Parallel()
newKubectlGetterTest(t).withResourceType(
"secret",
).withGetter(func(tt *kubectlGetterTest) (client.Object, error) {
return tt.k.GetSecretFromNamespace(tt.ctx, tt.kubeconfig, tt.name, tt.namespace)
}).withJsonFromFile(
"testdata/kubectl_secret.json",
).andWant(
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "vsphere-cloud-controller-manager",
Namespace: "eksa-system",
},
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
Data: map[string][]byte{
"data": []byte(`apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
`),
},
Type: corev1.SecretType("addons.cluster.x-k8s.io/resource-set"),
},
).testSuccess()
}
func TestKubectlGetSecretFromNamespaceError(t *testing.T) {
t.Parallel()
newKubectlGetterTest(t).withResourceType(
"secret",
).withGetter(func(tt *kubectlGetterTest) (client.Object, error) {
return tt.k.GetSecretFromNamespace(tt.ctx, tt.kubeconfig, tt.name, tt.namespace)
}).testError()
}
func TestKubectlGetSecret(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
responseFile string
wantSecret *corev1.Secret
params []string
wantErr error
}{
{
testName: "SuccessScenario",
responseFile: "testdata/kubectl_secret.json",
wantSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "vsphere-cloud-controller-manager",
Namespace: "eksa-system",
},
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
Data: map[string][]byte{
"data": []byte(`apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
`),
},
Type: corev1.SecretType("addons.cluster.x-k8s.io/resource-set"),
},
params: []string{"get", "secret", secretObjectName, "-o", "json", "--namespace", constants.EksaSystemNamespace, "--kubeconfig", "c.kubeconfig"},
wantErr: nil,
},
{
testName: "ErrorScenario",
responseFile: "testdata/kubectl_secret.json",
wantSecret: nil,
params: []string{"get", "secret", secretObjectName, "-o", "json", "--namespace", constants.EksaSystemNamespace, "--kubeconfig", "c.kubeconfig"},
wantErr: errors.New("error from kubectl client"),
},
}
for _, tc := range tests {
t.Run(tc.testName, func(tt *testing.T) {
response := test.ReadFile(t, tc.responseFile)
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, tc.params).Return(*bytes.NewBufferString(response), tc.wantErr)
secret, err := k.GetSecret(ctx, secretObjectName, executables.WithNamespace(constants.EksaSystemNamespace), executables.WithCluster(cluster))
g := NewWithT(t)
if tc.wantErr != nil {
g.Expect(err.Error()).To(HaveSuffix(tc.wantErr.Error()))
} else {
g.Expect(err).To(BeNil())
g.Expect(secret).To(Equal(tc.wantSecret))
}
})
}
}
func TestKubectlGetClusters(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantClusters []types.CAPICluster
}{
{
testName: "no clusters",
jsonResponseFile: "testdata/kubectl_no_clusters.json",
wantClusters: []types.CAPICluster{},
},
{
testName: "machines with node ref",
jsonResponseFile: "testdata/kubectl_clusters_one.json",
wantClusters: []types.CAPICluster{
{
Metadata: types.Metadata{
Name: "eksa-test-capd",
},
Status: types.ClusterStatus{
Phase: "Provisioned",
Conditions: []types.Condition{
{Type: "Ready", Status: "True"},
{Type: "ControlPlaneReady", Status: "True"},
{Type: "InfrastructureReady", Status: "True"},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
fileContent := test.ReadFile(t, tt.jsonResponseFile)
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{"get", "clusters.cluster.x-k8s.io", "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace}).Return(*bytes.NewBufferString(fileContent), nil)
gotClusters, err := k.GetClusters(ctx, cluster)
if err != nil {
t.Fatalf("Kubectl.GetClusters() error = %v, want nil", err)
}
if !reflect.DeepEqual(gotClusters, tt.wantClusters) {
t.Fatalf("Kubectl.GetClusters() clusters = %+v, want %+v", gotClusters, tt.wantClusters)
}
})
}
}
func TestKubectlGetEKSAClusters(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
expectedSpec v1alpha1.ClusterSpec
clusterName string
}{
{
testName: "EKS-A cluster found",
jsonResponseFile: "testdata/kubectl_eksa_cluster.json",
expectedSpec: v1alpha1.ClusterSpec{
KubernetesVersion: "1.19",
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{Count: 3},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3)}},
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "test-cluster",
},
},
clusterName: "test-cluster",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
fileContent := test.ReadFile(t, tt.jsonResponseFile)
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{"get", "clusters.anywhere.eks.amazonaws.com", "-A", "-o", "jsonpath={.items[0]}", "--kubeconfig", cluster.KubeconfigFile, "--field-selector=metadata.name=" + tt.clusterName}).Return(*bytes.NewBufferString(fileContent), nil)
gotCluster, err := k.GetEksaCluster(ctx, cluster, tt.clusterName)
if err != nil {
t.Fatalf("Kubectl.GetEKSAClusters() error = %v, want nil", err)
}
if !reflect.DeepEqual(gotCluster.Spec, tt.expectedSpec) {
t.Fatalf("Kubectl.GetEKSAClusters() clusters = %+v, want %+v", gotCluster.Spec, tt.expectedSpec)
}
})
}
}
func TestKubectlGetGetApiServerUrlSuccess(t *testing.T) {
t.Parallel()
wantUrl := "https://127.0.0.1:37479"
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(
ctx,
[]string{"config", "view", "--kubeconfig", cluster.KubeconfigFile, "--minify", "--raw", "-o", "jsonpath={.clusters[0].cluster.server}"},
).Return(*bytes.NewBufferString(wantUrl), nil)
gotUrl, err := k.GetApiServerUrl(ctx, cluster)
if err != nil {
t.Fatalf("Kubectl.GetApiServerUrl() error = %v, want nil", err)
}
if gotUrl != wantUrl {
t.Fatalf("Kubectl.GetApiServerUrl() url = %s, want %s", gotUrl, wantUrl)
}
}
func TestKubectlSetControllerEnvVarSuccess(t *testing.T) {
t.Parallel()
envVar := "TEST_VAR"
envVarValue := "TEST_VALUE"
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(
ctx,
[]string{
"set", "env", "deployment/eksa-controller-manager", fmt.Sprintf("%s=%s", envVar, envVarValue),
"--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace,
},
).Return(bytes.Buffer{}, nil)
err := k.SetEksaControllerEnvVar(ctx, envVar, envVarValue, cluster.KubeconfigFile)
if err != nil {
t.Fatalf("Kubectl.RolloutRestartDaemonSet() error = %v, want nil", err)
}
}
func TestKubectlRolloutRestartDaemonSetSuccess(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(
ctx,
[]string{
"rollout", "restart", "daemonset", "cilium",
"--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.KubeSystemNamespace,
},
).Return(bytes.Buffer{}, nil)
err := k.RolloutRestartDaemonSet(ctx, "cilium", constants.KubeSystemNamespace, cluster.KubeconfigFile)
if err != nil {
t.Fatalf("Kubectl.RolloutRestartDaemonSet() error = %v, want nil", err)
}
}
func TestKubectlRolloutRestartDaemonSetError(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(
ctx,
[]string{
"rollout", "restart", "daemonset", "cilium",
"--kubeconfig", cluster.KubeconfigFile, "--namespace", constants.KubeSystemNamespace,
},
).Return(bytes.Buffer{}, fmt.Errorf("error"))
err := k.RolloutRestartDaemonSet(ctx, "cilium", constants.KubeSystemNamespace, cluster.KubeconfigFile)
if err == nil {
t.Fatalf("Kubectl.RolloutRestartDaemonSet() expected error, but was nil")
}
}
func TestKubectlGetGetApiServerUrlError(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(
ctx,
[]string{"config", "view", "--kubeconfig", cluster.KubeconfigFile, "--minify", "--raw", "-o", "jsonpath={.clusters[0].cluster.server}"},
).Return(bytes.Buffer{}, errors.New("error in command"))
_, err := k.GetApiServerUrl(ctx, cluster)
if err == nil {
t.Fatal("Kubectl.GetApiServerUrl() error = nil, want not nil")
}
}
func TestKubectlGetPodsWithAllNamespaces(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantPodNames []string
}{
{
testName: "no pods",
jsonResponseFile: "testdata/kubectl_no_pods.json",
wantPodNames: []string{},
},
{
testName: "multiple pods",
jsonResponseFile: "testdata/kubectl_pods.json",
wantPodNames: []string{
"coredns-74ff55c5b-cnbwh",
"coredns-74ff55c5b-zlbph",
"etcd-lol-control-plane",
"kindnet-xzddb",
"kube-apiserver-lol-control-plane",
"kube-controller-manager-lol-control-plane",
"kube-proxy-27v6c",
"kube-scheduler-lol-control-plane",
"local-path-provisioner-78776bfc44-s9ggt",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
fileContent := test.ReadFile(t, tt.jsonResponseFile)
e.EXPECT().Execute(ctx, []string{"get", "pods", "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "-A"}).Return(*bytes.NewBufferString(fileContent), nil)
gotPods, err := k.GetPods(ctx, executables.WithCluster(cluster), executables.WithAllNamespaces())
if err != nil {
t.Fatalf("Kubectl.GetPods() error = %v, want nil", err)
}
gotNames := make([]string, 0, len(gotPods))
for _, p := range gotPods {
gotNames = append(gotNames, p.Name)
}
if !reflect.DeepEqual(gotNames, tt.wantPodNames) {
t.Fatalf("Kubectl.GetPods() pod names = %+v, want %+v", gotNames, tt.wantPodNames)
}
})
}
}
func TestKubectlGetPodsWithNamespace(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantPodNames []string
namespace string
}{
{
testName: "no pods",
jsonResponseFile: "testdata/kubectl_no_pods.json",
wantPodNames: []string{},
namespace: "kube-system",
},
{
testName: "multiple pods",
jsonResponseFile: "testdata/kubectl_pods.json",
wantPodNames: []string{
"coredns-74ff55c5b-cnbwh",
"coredns-74ff55c5b-zlbph",
"etcd-lol-control-plane",
"kindnet-xzddb",
"kube-apiserver-lol-control-plane",
"kube-controller-manager-lol-control-plane",
"kube-proxy-27v6c",
"kube-scheduler-lol-control-plane",
"local-path-provisioner-78776bfc44-s9ggt",
},
namespace: "kube-system",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
fileContent := test.ReadFile(t, tt.jsonResponseFile)
e.EXPECT().Execute(ctx, []string{"get", "pods", "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "--namespace", tt.namespace}).Return(*bytes.NewBufferString(fileContent), nil)
gotPods, err := k.GetPods(ctx, executables.WithCluster(cluster), executables.WithNamespace(tt.namespace))
if err != nil {
t.Fatalf("Kubectl.GetPods() error = %v, want nil", err)
}
gotNames := make([]string, 0, len(gotPods))
for _, p := range gotPods {
gotNames = append(gotNames, p.Name)
}
if !reflect.DeepEqual(gotNames, tt.wantPodNames) {
t.Fatalf("Kubectl.GetPods() pod names = %+v, want %+v", gotNames, tt.wantPodNames)
}
})
}
}
func TestKubectlGetPodsWithServerSkipTLSAndToken(t *testing.T) {
t.Parallel()
k, ctx, _, e := newKubectl(t)
server := "https://127.0.0.1:37479"
token := "token"
fileContent := test.ReadFile(t, "testdata/kubectl_no_pods.json")
e.EXPECT().Execute(
ctx, []string{"get", "pods", "-o", "json", "--server", server, "--token", token, "--insecure-skip-tls-verify=true", "-A"},
).Return(*bytes.NewBufferString(fileContent), nil)
gotPods, err := k.GetPods(ctx,
executables.WithServer(server), executables.WithToken(token), executables.WithSkipTLSVerify(), executables.WithAllNamespaces(),
)
if err != nil {
t.Fatalf("Kubectl.GetPods() error = %v, want nil", err)
}
if len(gotPods) != 0 {
t.Fatalf("Kubectl.GetPods() num pod = %d, want 0", len(gotPods))
}
}
func TestKubectlGetDeploymentsWithAllNamespaces(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantDeploymentNames []string
}{
{
testName: "no deployments",
jsonResponseFile: "testdata/kubectl_no_deployments.json",
wantDeploymentNames: []string{},
},
{
testName: "multiple deployments",
jsonResponseFile: "testdata/kubectl_deployments.json",
wantDeploymentNames: []string{
"coredns",
"local-path-provisioner",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
fileContent := test.ReadFile(t, tt.jsonResponseFile)
e.EXPECT().Execute(ctx, []string{"get", "deployments", "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "-A"}).Return(*bytes.NewBufferString(fileContent), nil)
gotDeployments, err := k.GetDeployments(ctx, executables.WithCluster(cluster), executables.WithAllNamespaces())
if err != nil {
t.Fatalf("Kubectl.GetDeployments() error = %v, want nil", err)
}
gotNames := make([]string, 0, len(gotDeployments))
for _, p := range gotDeployments {
gotNames = append(gotNames, p.Name)
}
if !reflect.DeepEqual(gotNames, tt.wantDeploymentNames) {
t.Fatalf("Kubectl.GetDeployments() deployments = %+v, want %+v", gotNames, tt.wantDeploymentNames)
}
})
}
}
func TestKubectlGetDeploymentsWithServerSkipTLSAndToken(t *testing.T) {
t.Parallel()
server := "https://127.0.0.1:37479"
token := "token"
k, ctx, _, e := newKubectl(t)
fileContent := test.ReadFile(t, "testdata/kubectl_no_deployments.json")
e.EXPECT().Execute(
ctx,
[]string{"get", "deployments", "-o", "json", "--server", server, "--token", token, "--insecure-skip-tls-verify=true", "-A"}).Return(*bytes.NewBufferString(fileContent), nil)
gotDeployments, err := k.GetDeployments(
ctx,
executables.WithServer(server), executables.WithToken(token), executables.WithSkipTLSVerify(), executables.WithAllNamespaces(),
)
if err != nil {
t.Fatalf("Kubectl.GetDeployments() error = %v, want nil", err)
}
if len(gotDeployments) != 0 {
t.Fatalf("Kubectl.GetDeployments() num deployments = %d, want 0", len(gotDeployments))
}
}
func TestKubectlGetDeploymentsWithNamespace(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantDeploymentNames []string
deploymentNamespace string
}{
{
testName: "no deployments",
jsonResponseFile: "testdata/kubectl_no_deployments.json",
wantDeploymentNames: []string{},
deploymentNamespace: "kube-system",
},
{
testName: "multiple deployments",
jsonResponseFile: "testdata/kubectl_deployments.json",
wantDeploymentNames: []string{
"coredns",
"local-path-provisioner",
},
deploymentNamespace: "kube-system",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
fileContent := test.ReadFile(t, tt.jsonResponseFile)
e.EXPECT().Execute(ctx, []string{"get", "deployments", "-o", "json", "--kubeconfig", cluster.KubeconfigFile, "--namespace", tt.deploymentNamespace}).Return(*bytes.NewBufferString(fileContent), nil)
gotDeployments, err := k.GetDeployments(ctx, executables.WithCluster(cluster), executables.WithNamespace(tt.deploymentNamespace))
if err != nil {
t.Fatalf("Kubectl.GetDeployments() error = %v, want nil", err)
}
gotNames := make([]string, 0, len(gotDeployments))
for _, p := range gotDeployments {
gotNames = append(gotNames, p.Name)
}
if !reflect.DeepEqual(gotNames, tt.wantDeploymentNames) {
t.Fatalf("Kubectl.GetDeployments() deployments = %+v, want %+v", gotNames, tt.wantDeploymentNames)
}
})
}
}
func TestKubectlGetMachineDeployments(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantMachineDeploymentNames []string
}{
{
testName: "no machine deployments",
jsonResponseFile: "testdata/kubectl_no_machine_deployments.json",
wantMachineDeploymentNames: []string{},
},
{
testName: "multiple machine deployments",
jsonResponseFile: "testdata/kubectl_machine_deployments.json",
wantMachineDeploymentNames: []string{
"test0-md-0",
"test1-md-0",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
fileContent := test.ReadFile(t, tt.jsonResponseFile)
e.EXPECT().Execute(ctx, []string{"get", "machinedeployments.cluster.x-k8s.io", "-o", "json", "--kubeconfig", cluster.KubeconfigFile}).Return(*bytes.NewBufferString(fileContent), nil)
gotDeployments, err := k.GetMachineDeployments(ctx, executables.WithCluster(cluster))
if err != nil {
t.Fatalf("Kubectl.GetMachineDeployments() error = %v, want nil", err)
}
gotNames := make([]string, 0, len(gotDeployments))
for _, p := range gotDeployments {
gotNames = append(gotNames, p.Name)
}
if !reflect.DeepEqual(gotNames, tt.wantMachineDeploymentNames) {
t.Fatalf("Kubectl.GetMachineDeployments() deployments = %+v, want %+v", gotNames, tt.wantMachineDeploymentNames)
}
})
}
}
func TestKubectlCountMachineDeploymentReplicasReady(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantError bool
wantTotal int
wantReady int
returnError bool
}{
{
testName: "no machine deployments",
jsonResponseFile: "testdata/kubectl_no_machine_deployments.json",
wantError: false,
wantReady: 0,
wantTotal: 0,
returnError: false,
},
{
testName: "multiple machine deployments",
jsonResponseFile: "testdata/kubectl_machine_deployments.json",
wantError: false,
wantReady: 2,
wantTotal: 2,
returnError: false,
},
{
testName: "multiple machine deployments with unready replicas",
jsonResponseFile: "testdata/kubectl_machine_deployments_unready.json",
wantError: false,
wantReady: 2,
wantTotal: 3,
returnError: false,
},
{
testName: "non-running machine deployments",
jsonResponseFile: "testdata/kubectl_machine_deployments_provisioned.json",
wantError: true,
wantReady: 0,
wantTotal: 0,
returnError: false,
},
{
testName: "unavailable replicas",
jsonResponseFile: "testdata/kubectl_machine_deployments_unavailable.json",
wantError: true,
wantReady: 0,
wantTotal: 0,
},
{
testName: "error response",
jsonResponseFile: "",
wantError: true,
wantReady: 0,
wantTotal: 0,
returnError: true,
},
}
for _, tc := range tests {
t.Run(tc.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
tt := newKubectlTest(t)
if tc.returnError {
e.EXPECT().
Execute(ctx, []string{
"get", "machinedeployments.cluster.x-k8s.io",
"-o", "json",
"--kubeconfig", cluster.KubeconfigFile,
"--namespace", "eksa-system",
"--selector=cluster.x-k8s.io/cluster-name=test-cluster",
}).
Return(*bytes.NewBufferString(""), errors.New(""))
} else {
fileContent := test.ReadFile(t, tc.jsonResponseFile)
e.EXPECT().
Execute(ctx, []string{
"get", "machinedeployments.cluster.x-k8s.io",
"-o", "json",
"--kubeconfig", cluster.KubeconfigFile,
"--namespace", "eksa-system",
"--selector=cluster.x-k8s.io/cluster-name=test-cluster",
}).
Return(*bytes.NewBufferString(fileContent), nil)
}
ready, total, err := k.CountMachineDeploymentReplicasReady(ctx, cluster.Name, cluster.KubeconfigFile)
if tc.wantError {
tt.Expect(err).NotTo(BeNil())
} else {
tt.Expect(err).To(BeNil())
}
tt.Expect(ready).To(Equal(tc.wantReady))
tt.Expect(total).To(Equal(tc.wantTotal))
})
}
}
func TestKubectlValidateWorkerNodes(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantError bool
}{
{
testName: "no machine deployments",
jsonResponseFile: "testdata/kubectl_no_machine_deployments.json",
wantError: false,
},
{
testName: "multiple machine deployments",
jsonResponseFile: "testdata/kubectl_machine_deployments.json",
wantError: false,
},
{
testName: "multiple machine deployments with unready replicas",
jsonResponseFile: "testdata/kubectl_machine_deployments_unready.json",
wantError: true,
},
{
testName: "non-running machine deployments",
jsonResponseFile: "testdata/kubectl_machine_deployments_provisioned.json",
wantError: true,
},
}
for _, tc := range tests {
t.Run(tc.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
tt := newKubectlTest(t)
fileContent := test.ReadFile(t, tc.jsonResponseFile)
e.EXPECT().
Execute(ctx, []string{
"get", "machinedeployments.cluster.x-k8s.io",
"-o", "json",
"--kubeconfig", cluster.KubeconfigFile,
"--namespace", "eksa-system",
"--selector=cluster.x-k8s.io/cluster-name=test-cluster",
}).
Return(*bytes.NewBufferString(fileContent), nil)
err := k.ValidateWorkerNodes(ctx, cluster.Name, cluster.KubeconfigFile)
if tc.wantError {
tt.Expect(err).NotTo(BeNil())
} else {
tt.Expect(err).To(BeNil())
}
})
}
}
func TestKubectlGetKubeAdmControlPlanes(t *testing.T) {
t.Parallel()
tests := []struct {
testName string
jsonResponseFile string
wantCpNames []string
}{
{
testName: "no control planes",
jsonResponseFile: "testdata/kubectl_no_kubeadmcps.json",
wantCpNames: []string{},
},
{
testName: "multiple control planes",
jsonResponseFile: "testdata/kubectl_kubeadmcps.json",
wantCpNames: []string{
"test0-control-plane",
"test1-control-plane",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
k, ctx, cluster, e := newKubectl(t)
fileContent := test.ReadFile(t, tt.jsonResponseFile)
e.EXPECT().Execute(ctx, []string{"get", "kubeadmcontrolplanes.controlplane.cluster.x-k8s.io", "-o", "json", "--kubeconfig", cluster.KubeconfigFile}).Return(*bytes.NewBufferString(fileContent), nil)
gotCps, err := k.GetKubeadmControlPlanes(ctx, executables.WithCluster(cluster))
if err != nil {
t.Fatalf("Kubectl.GetKubeadmControlPlanes() error = %v, want nil", err)
}
gotNames := make([]string, 0, len(gotCps))
for _, p := range gotCps {
gotNames = append(gotNames, p.Name)
}
if !reflect.DeepEqual(gotNames, tt.wantCpNames) {
t.Fatalf("Kubectl.GetKubeadmControlPlanes() controlPlanes = %+v, want %+v", gotNames, tt.wantCpNames)
}
})
}
}
func TestKubectlVersion(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
fileContent := test.ReadFile(t, "testdata/kubectl_version.json")
e.EXPECT().Execute(ctx, []string{"version", "-o", "json", "--kubeconfig", cluster.KubeconfigFile}).Return(*bytes.NewBufferString(fileContent), nil)
gotVersions, err := k.Version(ctx, cluster)
if err != nil {
t.Fatalf("Kubectl.Version() error = %v, want nil", err)
}
wantVersions := &executables.VersionResponse{
ClientVersion: version.Info{
Major: "1",
Minor: "21",
GitVersion: "v1.21.2",
GitCommit: "092fbfbf53427de67cac1e9fa54aaa09a28371d7",
GitTreeState: "clean",
BuildDate: "2021-06-16T12:59:11Z",
GoVersion: "go1.16.5",
Compiler: "gc",
Platform: "darwin/amd64",
},
ServerVersion: version.Info{
Major: "1",
Minor: "18+",
GitVersion: "v1.18.16-eks-1-18-4",
GitCommit: "3cdb4c9ab835e2964c8eaeb3ee77d088c7fa36aa",
GitTreeState: "clean",
BuildDate: "2021-05-05T13:09:23Z",
GoVersion: "go1.13.15",
Compiler: "gc",
Platform: "linux/amd64",
},
}
if !reflect.DeepEqual(gotVersions, wantVersions) {
t.Fatalf("Kubectl.Version() versionResponse = %+v, want %+v", gotVersions, wantVersions)
}
}
func TestKubectlValidateClustersCRDSuccess(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{"get", "customresourcedefinition", "clusters.cluster.x-k8s.io", "--kubeconfig", cluster.KubeconfigFile}).Return(bytes.Buffer{}, nil)
err := k.ValidateClustersCRD(ctx, cluster)
if err != nil {
t.Fatalf("Kubectl.ValidateClustersCRD() error = %v, want nil", err)
}
}
func TestKubectlValidateClustersCRDNotFound(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{"get", "customresourcedefinition", "clusters.cluster.x-k8s.io", "--kubeconfig", cluster.KubeconfigFile}).Return(bytes.Buffer{}, errors.New("CRD not found"))
err := k.ValidateClustersCRD(ctx, cluster)
if err == nil {
t.Fatalf("Kubectl.ValidateClustersCRD() error == nil, want CRD not found")
}
}
func TestKubectlValidateEKSAClustersCRDSuccess(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{"get", "customresourcedefinition", "clusters.anywhere.eks.amazonaws.com", "--kubeconfig", cluster.KubeconfigFile}).Return(bytes.Buffer{}, nil)
err := k.ValidateEKSAClustersCRD(ctx, cluster)
if err != nil {
t.Fatalf("Kubectl.ValidateEKSAClustersCRD() error = %v, want nil", err)
}
}
func TestKubectlValidateEKSAClustersCRDNotFound(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{"get", "customresourcedefinition", "clusters.anywhere.eks.amazonaws.com", "--kubeconfig", cluster.KubeconfigFile}).Return(bytes.Buffer{}, errors.New("CRD not found"))
err := k.ValidateEKSAClustersCRD(ctx, cluster)
if err == nil {
t.Fatalf("Kubectl.ValidateEKSAClustersCRD() error == nil, want CRD not found")
}
}
func TestKubectlUpdateEnvironmentVariablesInNamespace(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
envMap := map[string]string{
"key": "val",
}
e.EXPECT().Execute(ctx, []string{
"set", "env", "deployment",
"eksa-controller-manager", "key=val",
"--kubeconfig", cluster.KubeconfigFile,
"--namespace", "eksa-system",
})
err := k.UpdateEnvironmentVariablesInNamespace(ctx, "deployment", "eksa-controller-manager", envMap, cluster, "eksa-system")
if err != nil {
t.Fatalf("Kubectl.UpdateEnvironmentVariablesInNamespace() error = %v, want nil", err)
}
}
func TestKubectlUpdateAnnotation(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{
"annotate", "gitrepositories", "flux-system",
"key1=val1", "--overwrite",
"--kubeconfig", cluster.KubeconfigFile,
})
a := map[string]string{
"key1": "val1",
}
err := k.UpdateAnnotation(ctx, "gitrepositories", "flux-system", a, executables.WithOverwrite(), executables.WithCluster(cluster))
if err != nil {
t.Fatalf("Kubectl.UpdateAnnotation() error = %v, want nil", err)
}
}
func TestKubectlRemoveAnnotation(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{
"annotate", "cluster", "test-cluster", "key1-", "--kubeconfig", cluster.KubeconfigFile,
})
err := k.RemoveAnnotation(ctx, "cluster", "test-cluster", "key1", executables.WithCluster(cluster))
if err != nil {
t.Fatalf("Kubectl.RemoveAnnotation() error = %v, want nil", err)
}
}
func TestKubectlRemoveAnnotationInNamespace(t *testing.T) {
t.Parallel()
k, ctx, cluster, e := newKubectl(t)
e.EXPECT().Execute(ctx, []string{
"annotate", "cluster", "test-cluster", "key1-", "--kubeconfig", cluster.KubeconfigFile, "--namespace", "",
})
err := k.RemoveAnnotationInNamespace(ctx, "cluster", "test-cluster", "key1", cluster, "")
if err != nil {
t.Fatalf("Kubectl.RemoveAnnotationInNamespace() error = %v, want nil", err)
}
}
func TestKubectlGetBundles(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
wantBundles := test.Bundles(t)
bundleName := "Bundle-name"
bundlesJson, err := json.Marshal(wantBundles)
if err != nil {
t.Fatalf("Failed marshalling Bundles: %s", err)
}
tt.e.EXPECT().Execute(
tt.ctx,
"get", "bundles.anywhere.eks.amazonaws.com", bundleName, "-o", "json", "--kubeconfig", tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
).Return(*bytes.NewBuffer(bundlesJson), nil)
gotBundles, err := tt.k.GetBundles(tt.ctx, tt.cluster.KubeconfigFile, bundleName, tt.namespace)
tt.Expect(err).To(BeNil())
tt.Expect(gotBundles).To(Equal(wantBundles))
}
func TestKubectlGetClusterResourceSet(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
resourceSetJson := test.ReadFile(t, "testdata/kubectl_clusterresourceset.json")
resourceSetName := "Bundle-name"
wantResourceSet := &addons.ClusterResourceSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "addons.cluster.x-k8s.io/v1beta1",
Kind: "ClusterResourceSet",
},
Spec: addons.ClusterResourceSetSpec{
ClusterSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster.x-k8s.io/cluster-name": "cluster-1",
},
},
Strategy: "ApplyOnce",
Resources: []addons.ResourceRef{
{
Kind: "Secret",
Name: "vsphere-cloud-controller-manager",
},
{
Kind: "ConfigMap",
Name: "vsphere-cloud-controller-manager-role",
},
},
},
}
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.cluster.KubeconfigFile, "clusterresourcesets.addons.cluster.x-k8s.io", "--namespace", tt.namespace, resourceSetName,
).Return(*bytes.NewBufferString(resourceSetJson), nil)
gotResourceSet, err := tt.k.GetClusterResourceSet(tt.ctx, tt.cluster.KubeconfigFile, resourceSetName, tt.namespace)
tt.Expect(err).To(BeNil())
tt.Expect(gotResourceSet).To(Equal(wantResourceSet))
}
func TestKubectlPauseCAPICluster(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
patch := "{\"spec\":{\"paused\":true}}"
tt.e.EXPECT().Execute(
tt.ctx,
"patch", capiClustersResourceType, "test-cluster", "--type=merge", "-p", patch,
"--kubeconfig", tt.cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.PauseCAPICluster(tt.ctx, "test-cluster", tt.cluster.KubeconfigFile)).To(Succeed())
}
func TestKubectlResumeCAPICluster(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
patch := "{\"spec\":{\"paused\":null}}"
tt.e.EXPECT().Execute(
tt.ctx,
"patch", capiClustersResourceType, "test-cluster", "--type=merge", "-p", patch,
"--kubeconfig", tt.cluster.KubeconfigFile, "--namespace", constants.EksaSystemNamespace,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.ResumeCAPICluster(tt.ctx, "test-cluster", tt.cluster.KubeconfigFile)).To(Succeed())
}
func TestKubectlMergePatchResource(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
patch := "{\"spec\":{\"paused\":false}}"
tt.e.EXPECT().Execute(
tt.ctx,
"patch", capiClustersResourceType, "test-cluster", "--type=merge", "-p", patch,
"--kubeconfig", tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.MergePatchResource(tt.ctx, capiClustersResourceType, "test-cluster", patch,
tt.cluster.KubeconfigFile, tt.namespace)).To(Succeed())
}
func TestKubectlMergePatchResourceError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
patch := "{\"spec\":{\"paused\":false}}"
tt.e.EXPECT().Execute(
tt.ctx,
"patch", capiClustersResourceType, "test-cluster", "--type=merge", "-p", patch,
"--kubeconfig", tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
).Return(bytes.Buffer{}, errors.New("Error with kubectl merge patch"))
err := tt.k.MergePatchResource(tt.ctx, capiClustersResourceType, "test-cluster", patch,
tt.cluster.KubeconfigFile, tt.namespace)
tt.Expect(err).To(HaveOccurred())
}
func TestKubectlGetConfigMap(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
configmapJson := test.ReadFile(t, "testdata/kubectl_configmap.json")
configmapName := "foo"
wantConfigmap := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: configmapName,
Namespace: "eksa-system",
},
Data: map[string]string{
"data": "foo",
},
}
tt.e.EXPECT().Execute(
tt.ctx,
"get", "configmap", configmapName, "-o", "json", "--kubeconfig", tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
).Return(*bytes.NewBufferString(configmapJson), nil)
gotConfigmap, err := tt.k.GetConfigMap(tt.ctx, tt.cluster.KubeconfigFile, configmapName, tt.namespace)
tt.Expect(err).To(BeNil())
tt.Expect(gotConfigmap).To(Equal(wantConfigmap))
}
func TestKubectlSetDaemonSetImage(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
daemonSetName := "ds-1"
container := "cont1"
image := "public.ecr.aws/image2"
tt.e.EXPECT().Execute(
tt.ctx,
"set", "image", "daemonset/ds-1", "cont1=public.ecr.aws/image2", "--namespace", tt.namespace, "--kubeconfig", tt.cluster.KubeconfigFile,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.SetDaemonSetImage(tt.ctx, tt.cluster.KubeconfigFile, daemonSetName, tt.namespace, container, image)).To(Succeed())
}
func TestKubectlCheckCAPIProviderExistsNotInstalled(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
providerName := "providerName"
providerNs := "providerNs"
tt.e.EXPECT().Execute(tt.ctx,
[]string{"get", "namespace", fmt.Sprintf("--field-selector=metadata.name=%s", providerNs), "--kubeconfig", tt.cluster.KubeconfigFile}).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.CheckProviderExists(tt.ctx, tt.cluster.KubeconfigFile, providerName, providerNs))
}
func TestKubectlCheckCAPIProviderExistsInstalled(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
providerName := "providerName"
providerNs := "providerNs"
tt.e.EXPECT().Execute(tt.ctx,
[]string{"get", "namespace", fmt.Sprintf("--field-selector=metadata.name=%s", providerNs), "--kubeconfig", tt.cluster.KubeconfigFile}).Return(*bytes.NewBufferString("namespace"), nil)
tt.e.EXPECT().Execute(tt.ctx,
[]string{"get", "providers.clusterctl.cluster.x-k8s.io", "--namespace", providerNs, fmt.Sprintf("--field-selector=metadata.name=%s", providerName), "--kubeconfig", tt.cluster.KubeconfigFile})
tt.Expect(tt.k.CheckProviderExists(tt.ctx, tt.cluster.KubeconfigFile, providerName, providerNs))
}
func TestKubectlGetDeploymentSuccess(t *testing.T) {
t.Parallel()
var replicas int32 = 2
newKubectlGetterTest(t).withResourceType(
"deployment",
).withGetter(func(tt *kubectlGetterTest) (client.Object, error) {
return tt.k.GetDeployment(tt.ctx, tt.name, tt.namespace, tt.kubeconfig)
}).withJsonFromFile(
"testdata/kubectl_deployment.json",
).andWant(
&appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "coredns",
Namespace: "kube-system",
},
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "registry.k8s/coredns:1.7.0",
Name: "coredns",
},
},
},
},
},
},
).testSuccess()
}
func TestKubectlGetDeploymentError(t *testing.T) {
t.Parallel()
newKubectlGetterTest(t).withResourceType(
"deployment",
).withGetter(func(tt *kubectlGetterTest) (client.Object, error) {
return tt.k.GetDeployment(tt.ctx, tt.name, tt.namespace, tt.kubeconfig)
}).testError()
}
func TestKubectlGetDaemonSetSuccess(t *testing.T) {
t.Parallel()
newKubectlGetterTest(t).withResourceType(
"daemonset",
).withGetter(func(tt *kubectlGetterTest) (client.Object, error) {
return tt.k.GetDaemonSet(tt.ctx, tt.name, tt.namespace, tt.kubeconfig)
}).withJsonFromFile(
"testdata/kubectl_daemonset.json",
).andWant(
&appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "cilium",
Namespace: "kube-system",
},
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Command: []string{"cilium-agent"},
Image: "public.ecr.aws/isovalent/cilium:v1.9.11-eksa.1",
Name: "cilium-agent",
},
},
},
},
},
},
).testSuccess()
}
func TestKubectlGetDaemonSetError(t *testing.T) {
t.Parallel()
newKubectlGetterTest(t).withResourceType(
"daemonset",
).withGetter(func(tt *kubectlGetterTest) (client.Object, error) {
return tt.k.GetDaemonSet(tt.ctx, tt.name, tt.namespace, tt.kubeconfig)
}).testError()
}
func TestApplyTolerationsFromTaints(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
params := []string{
"get", "ds", "test",
"-o", "jsonpath={range .spec.template.spec}{.tolerations} {end}",
"-n", "testNs", "--kubeconfig", tt.cluster.KubeconfigFile,
}
tt.e.EXPECT().Execute(
tt.ctx, gomock.Eq(params)).Return(bytes.Buffer{}, nil)
var taints []corev1.Taint
tt.Expect(tt.k.ApplyTolerationsFromTaints(tt.ctx, taints, taints, "ds", "test", tt.cluster.KubeconfigFile, "testNs", "/test")).To(Succeed())
}
func TestKubectlGetObjectNotFound(t *testing.T) {
t.Parallel()
tests := []struct {
name string
resourceType string
}{
{
name: "simple resource type",
resourceType: "cluster",
},
{
name: "resource type with resource and group",
resourceType: "cluster.x-k8s.io",
},
{
name: "resource type with resource, version and group",
resourceType: "cluster.v1beta1.x-k8s.io",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tt := newKubectlTest(t)
name := "my-cluster"
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, tc.resourceType, "--namespace", tt.namespace, name,
).Return(bytes.Buffer{}, nil)
err := tt.k.GetObject(tt.ctx, tc.resourceType, name, tt.namespace, tt.kubeconfig, &clusterv1.Cluster{})
tt.Expect(err).To(HaveOccurred())
tt.Expect(apierrors.IsNotFound(err)).To(BeTrue())
})
}
}
func TestKubectlGetObjectWithEMptyNamespace(t *testing.T) {
tt := newKubectlTest(t)
name := "my-cluster"
emptyNamespace := ""
tt.e.EXPECT().Execute(
tt.ctx,
// Here we expect the command to have the default namespace set explicitly
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "cluster", "--namespace", "default", name,
).Return(bytes.Buffer{}, nil)
err := tt.k.GetObject(tt.ctx, "cluster", name, emptyNamespace, tt.kubeconfig, &clusterv1.Cluster{})
tt.Expect(err).To(HaveOccurred())
tt.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
func TestKubectlGetAllObjects(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
list := &v1alpha1.ClusterList{}
b, err := json.Marshal(list)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--all-namespaces",
).Return(*bytes.NewBuffer(b), nil)
got := &v1alpha1.ClusterList{}
tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, got)).To(Succeed())
tt.Expect(got).To(BeComparableTo(list))
}
func TestKubectlGetAllObjectsInNamespace(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
list := &v1alpha1.ClusterList{}
b, err := json.Marshal(list)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--namespace", tt.namespace,
).Return(*bytes.NewBuffer(b), nil)
opts := &kubernetes.KubectlGetOptions{
Namespace: tt.namespace,
}
got := &v1alpha1.ClusterList{}
tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, got, opts)).To(Succeed())
tt.Expect(got).To(BeComparableTo(list))
}
func TestKubectlGetSingleObject(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
clusterName := "my-cluster"
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
}
b, err := json.Marshal(cluster)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--namespace", tt.namespace, clusterName,
).Return(*bytes.NewBuffer(b), nil)
opts := &kubernetes.KubectlGetOptions{
Namespace: tt.namespace,
Name: clusterName,
}
got := &v1alpha1.Cluster{}
tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, got, opts)).To(Succeed())
tt.Expect(got).To(BeComparableTo(cluster))
}
func TestKubectlGetWithNameAndWithoutNamespace(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
clusterName := "my-cluster"
opts := &kubernetes.KubectlGetOptions{
Name: clusterName,
}
tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, &v1alpha1.Cluster{}, opts)).To(
MatchError(ContainSubstring("if Name is specified, Namespace is required")),
)
}
func TestKubectlCreateSuccess(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
secret := &corev1.Secret{}
b, err := yaml.Marshal(secret)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().ExecuteWithStdin(
tt.ctx,
b,
"create", "-f", "-", "--kubeconfig", tt.kubeconfig,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.Create(tt.ctx, tt.kubeconfig, secret)).To(Succeed())
}
func TestKubectlCreateAlreadyExistsError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
secret := &corev1.Secret{}
b, err := yaml.Marshal(secret)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().ExecuteWithStdin(
tt.ctx,
b,
"create", "-f", "-", "--kubeconfig", tt.kubeconfig,
).Return(
bytes.Buffer{},
errors.New("Error from server (AlreadyExists): error when creating \"STDIN\": secret \"my-secret\" already exists\n"), //nolint:revive // The format of the message it's important here since the code checks for its content
)
err = tt.k.Create(tt.ctx, tt.kubeconfig, secret)
tt.Expect(err).To(HaveOccurred())
tt.Expect(apierrors.IsAlreadyExists(err)).To(BeTrue(), "error should be an AlreadyExists apierror")
}
func TestKubectlReplaceSuccess(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
secret := &corev1.Secret{}
b, err := yaml.Marshal(secret)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().ExecuteWithStdin(
tt.ctx,
b,
"replace", "-f", "-", "--kubeconfig", tt.kubeconfig,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.Replace(tt.ctx, tt.kubeconfig, secret)).To(Succeed())
}
func TestKubectlReplaceSuccessWithLastAppliedAnnotation(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"kubectl.kubernetes.io/last-applied-configuration": "fake value",
"my-other-annotation": "true",
},
Name: "my-secret",
Namespace: "my-ns",
},
}
cleanedUpSecret := secret.DeepCopy()
cleanedUpSecret.Annotations = map[string]string{
"my-other-annotation": "true",
}
b, err := yaml.Marshal(cleanedUpSecret)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().ExecuteWithStdin(
tt.ctx,
b,
"replace", "-f", "-", "--kubeconfig", tt.kubeconfig,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.Replace(tt.ctx, tt.kubeconfig, secret)).To(Succeed())
}
func TestKubectlGetClusterObjectNotFound(t *testing.T) {
t.Parallel()
test := newKubectlTest(t)
name := "my-cluster"
test.e.EXPECT().Execute(
test.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", test.kubeconfig, "testresource", name,
).Return(bytes.Buffer{}, nil)
err := test.k.GetClusterObject(test.ctx, "testresource", name, test.kubeconfig, &clusterv1.Cluster{})
test.Expect(err).To(HaveOccurred())
test.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
func TestKubectlGetEksaFluxConfig(t *testing.T) {
t.Parallel()
kubeconfig := "/my/kubeconfig"
namespace := "eksa-system"
eksaFluxConfigResourceType := fmt.Sprintf("fluxconfigs.%s", v1alpha1.GroupVersion.Group)
returnConfig := &v1alpha1.FluxConfig{}
returnConfigBytes, err := json.Marshal(returnConfig)
if err != nil {
t.Errorf("failed to create output object for test")
}
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"get", eksaFluxConfigResourceType, "testFluxConfig", "-o", "json", "--kubeconfig", kubeconfig, "--namespace", namespace}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(*bytes.NewBuffer(returnConfigBytes), nil)
_, err = k.GetEksaFluxConfig(ctx, "testFluxConfig", kubeconfig, namespace)
if err != nil {
t.Errorf("Kubectl.GetEksaFluxConfig() error = %v, want error = nil", err)
}
}
func TestKubectlDeleteFluxConfig(t *testing.T) {
t.Parallel()
namespace := "eksa-system"
kubeconfig := "/my/kubeconfig"
eksaFluxConfigResourceType := fmt.Sprintf("fluxconfigs.%s", v1alpha1.GroupVersion.Group)
returnConfig := &v1alpha1.FluxConfig{}
returnConfigBytes, err := json.Marshal(returnConfig)
if err != nil {
t.Errorf("failed to create output object for test")
}
mgmtCluster := &types.Cluster{KubeconfigFile: kubeconfig}
k, ctx, _, e := newKubectl(t)
expectedParam := []string{"delete", eksaFluxConfigResourceType, "testFluxConfig", "--kubeconfig", mgmtCluster.KubeconfigFile, "--namespace", namespace, "--ignore-not-found=true"}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(*bytes.NewBuffer(returnConfigBytes), nil)
err = k.DeleteFluxConfig(ctx, mgmtCluster, "testFluxConfig", namespace)
if err != nil {
t.Errorf("Kubectl.DeleteFluxConfig() error = %v, want error = nil", err)
}
}
func TestGetTinkerbellDatacenterConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
datacenterJson := test.ReadFile(t, "testdata/kubectl_tinkerbelldatacenter.json")
wantDatacenter := &v1alpha1.TinkerbellDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "mycluster",
},
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellDatacenterConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
Spec: v1alpha1.TinkerbellDatacenterConfigSpec{
TinkerbellIP: "1.2.3.4",
},
}
params := []string{
"get", "tinkerbelldatacenterconfigs.anywhere.eks.amazonaws.com", "mycluster", "-o", "json", "--kubeconfig",
tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(datacenterJson), nil)
got, err := tt.k.GetEksaTinkerbellDatacenterConfig(tt.ctx, "mycluster", tt.cluster.KubeconfigFile, tt.namespace)
tt.Expect(err).To(BeNil())
tt.Expect(got).To(Equal(wantDatacenter))
}
func TestGetTinkerbellMachineConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
machineconfigJson := test.ReadFile(t, "testdata/kubectl_tinkerbellmachineconfig.json")
wantMachineConfig := &v1alpha1.TinkerbellMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "mycluster",
},
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellMachineConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
Spec: v1alpha1.TinkerbellMachineConfigSpec{
OSFamily: "ubuntu",
TemplateRef: v1alpha1.Ref{
Name: "mycluster",
Kind: "TinkerbellTemplateConfig",
},
},
}
params := []string{
"get", "tinkerbellmachineconfigs.anywhere.eks.amazonaws.com", "mycluster", "-o", "json", "--kubeconfig",
tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(machineconfigJson), nil)
got, err := tt.k.GetEksaTinkerbellMachineConfig(tt.ctx, "mycluster", tt.cluster.KubeconfigFile, tt.namespace)
tt.Expect(err).To(BeNil())
tt.Expect(got).To(Equal(wantMachineConfig))
}
func TestGetTinkerbellMachineConfigInvalid(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
machineconfigJson := test.ReadFile(t, "testdata/kubectl_tinkerbellmachineconfig_invalid.json")
params := []string{
"get", "tinkerbellmachineconfigs.anywhere.eks.amazonaws.com", "mycluster", "-o", "json", "--kubeconfig",
tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(machineconfigJson), nil)
_, err := tt.k.GetEksaTinkerbellMachineConfig(tt.ctx, "mycluster", tt.cluster.KubeconfigFile, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestGetTinkerbellDatacenterConfigInvalid(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
datacenterconfigJson := test.ReadFile(t, "testdata/kubectl_tinkerbelldatacenter_invalid.json")
params := []string{
"get", "tinkerbelldatacenterconfigs.anywhere.eks.amazonaws.com", "mycluster", "-o", "json", "--kubeconfig",
tt.cluster.KubeconfigFile, "--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(datacenterconfigJson), nil)
_, err := tt.k.GetEksaTinkerbellDatacenterConfig(tt.ctx, "mycluster", tt.cluster.KubeconfigFile, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestGetTinkerbellMachineConfigNotFound(t *testing.T) {
t.Parallel()
var kubeconfigfile string
tt := newKubectlTest(t)
params := []string{
"get", "tinkerbellmachineconfigs.anywhere.eks.amazonaws.com", "test", "-o", "json", "--kubeconfig",
kubeconfigfile, "--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(""), errors.New("machineconfig not found"))
_, err := tt.k.GetEksaTinkerbellMachineConfig(tt.ctx, "test", kubeconfigfile, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestGetTinkerbellDatacenterConfigNotFound(t *testing.T) {
t.Parallel()
var kubeconfigfile string
tt := newKubectlTest(t)
params := []string{
"get", "tinkerbelldatacenterconfigs.anywhere.eks.amazonaws.com", "test", "-o", "json", "--kubeconfig",
kubeconfigfile, "--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(""), errors.New("datacenterconfig not found"))
_, err := tt.k.GetEksaTinkerbellDatacenterConfig(tt.ctx, "test", kubeconfigfile, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestGetUnprovisionedTinkerbellHardware(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
hardwareJSON := test.ReadFile(t, "testdata/kubectl_tinkerbellhardware.json")
kubeconfig := "foo/bar"
var expect []tinkv1alpha1.Hardware
for _, h := range []string{"hw1", "hw2"} {
expect = append(expect, tinkv1alpha1.Hardware{
TypeMeta: metav1.TypeMeta{
Kind: "Hardware",
APIVersion: "tinkerbell.org/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: h,
},
})
}
params := []string{
"get", executables.TinkerbellHardwareResourceType,
"-l", "!v1alpha1.tinkerbell.org/ownerName",
"--kubeconfig", kubeconfig,
"-o", "json",
"--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(hardwareJSON), nil)
hardware, err := tt.k.GetUnprovisionedTinkerbellHardware(tt.ctx, kubeconfig, tt.namespace)
tt.Expect(err).To(Succeed())
tt.Expect(hardware).To(Equal(expect))
}
func TestGetUnprovisionedTinkerbellHardware_MarshallingError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
kubeconfig := "foo/bar"
var buf bytes.Buffer
params := []string{
"get", executables.TinkerbellHardwareResourceType,
"-l", "!v1alpha1.tinkerbell.org/ownerName",
"--kubeconfig", kubeconfig,
"-o", "json",
"--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(buf, nil)
_, err := tt.k.GetUnprovisionedTinkerbellHardware(tt.ctx, kubeconfig, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestGetUnprovisionedTinkerbellHardware_ExecutableErrors(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
kubeconfig := "foo/bar"
var buf bytes.Buffer
expect := errors.New("foo bar")
params := []string{
"get", executables.TinkerbellHardwareResourceType,
"-l", "!v1alpha1.tinkerbell.org/ownerName",
"--kubeconfig", kubeconfig,
"-o", "json",
"--namespace", tt.namespace,
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(buf, expect)
_, err := tt.k.GetUnprovisionedTinkerbellHardware(tt.ctx, kubeconfig, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestKubectlDeleteSingleObject(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
name := "my-cluster"
resourceType := "cluster.x-k8s.io"
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "--kubeconfig", tt.kubeconfig, resourceType, name, "--namespace", tt.namespace,
).Return(bytes.Buffer{}, nil)
opts := &kubernetes.KubectlDeleteOptions{
Name: name,
Namespace: tt.namespace,
}
tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(Succeed())
}
func TestKubectlDeleteAllObjectsInNamespace(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
resourceType := "cluster.x-k8s.io"
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "--kubeconfig", tt.kubeconfig, resourceType, "--all", "--namespace", tt.namespace,
).Return(bytes.Buffer{}, nil)
opts := &kubernetes.KubectlDeleteOptions{
Namespace: tt.namespace,
}
tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(Succeed())
}
func TestKubectlDeleteAllObjectsInNamespaceWithLabels(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
resourceType := "cluster.x-k8s.io"
tt.e.EXPECT().Execute(
tt.ctx,
"delete",
"--kubeconfig", tt.kubeconfig,
resourceType,
"--namespace", tt.namespace,
"--selector", "label1=val1,label2=val2",
).Return(bytes.Buffer{}, nil)
opts := &kubernetes.KubectlDeleteOptions{
Namespace: tt.namespace,
HasLabels: map[string]string{
"label2": "val2",
"label1": "val1",
},
}
tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(Succeed())
}
func TestKubectlDeleteAllObjectsInAllNamespaces(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
resourceType := "cluster.x-k8s.io"
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "--kubeconfig", tt.kubeconfig, resourceType, "--all", "--all-namespaces",
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig)).To(Succeed())
}
func TestKubectlDeleteObjectWithNoNamespace(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
resourceType := "cluster.x-k8s.io"
clusterName := "my-cluster"
opts := &kubernetes.KubectlDeleteOptions{
Name: clusterName,
}
tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(
MatchError(ContainSubstring("if Name is specified, Namespace is required")),
)
}
func TestKubectlDeleteObjectWithHasLabels(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
resourceType := "cluster.x-k8s.io"
clusterName := "my-cluster"
opts := &kubernetes.KubectlDeleteOptions{
Name: clusterName,
Namespace: tt.namespace,
HasLabels: map[string]string{
"mylabel": "value",
},
}
tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(
MatchError(ContainSubstring("options for HasLabels and Name are mutually exclusive")),
)
}
func TestKubectlDeleteObjectNotFoundError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
name := "my-cluster"
resourceType := "cluster.x-k8s.io"
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "--kubeconfig", tt.kubeconfig, resourceType, name, "--namespace", tt.namespace,
).Return(
bytes.Buffer{},
errors.New("Error from server (NotFound): cluster \"my-cluster\" not found\n"), //nolint:revive // The format of the message it's important here since the code checks for its content
)
opts := &kubernetes.KubectlDeleteOptions{
Name: name,
Namespace: tt.namespace,
}
err := tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)
tt.Expect(err).To(HaveOccurred())
tt.Expect(apierrors.IsNotFound(err)).To(BeTrue(), "error should be a NotFound apierror")
}
func TestKubectlDeleteClusterObject(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
name := "my-storageclass"
resourceType := "storageclass"
tt.e.EXPECT().Execute(
tt.ctx,
"delete", resourceType, name, "--kubeconfig", tt.kubeconfig,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.DeleteClusterObject(tt.ctx, resourceType, name, tt.kubeconfig)).To(Succeed())
}
func TestKubectlDeleteClusterObjectError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
name := "my-storageclass"
resourceType := "storageclass"
tt.e.EXPECT().Execute(
tt.ctx,
"delete", resourceType, name, "--kubeconfig", tt.kubeconfig,
).Return(bytes.Buffer{}, errors.New("test error"))
tt.Expect(tt.k.DeleteClusterObject(tt.ctx, resourceType, name, tt.kubeconfig)).NotTo(Succeed())
}
func TestKubectlWaitForManagedExternalEtcdNotReady(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
timeout := "5m"
expectedTimeout := "300.00s"
tt.e.EXPECT().Execute(
tt.ctx,
"wait", "--timeout", expectedTimeout, "--for=condition=ManagedEtcdReady=false", "clusters.cluster.x-k8s.io/test", "--kubeconfig", tt.cluster.KubeconfigFile, "-n", "eksa-system",
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.WaitForManagedExternalEtcdNotReady(tt.ctx, tt.cluster, timeout, "test")).To(Succeed())
}
func TestKubectlWaitForMachineDeploymentReady(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
timeout := "5m"
expectedTimeout := "300.00s"
tt.e.EXPECT().Execute(
tt.ctx,
"wait", "--timeout", expectedTimeout, "--for=condition=Ready=true", "machinedeployments.cluster.x-k8s.io/test", "--kubeconfig", tt.cluster.KubeconfigFile, "-n", "eksa-system",
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.WaitForMachineDeploymentReady(tt.ctx, tt.cluster, timeout, "test")).To(Succeed())
}
func TestKubectlWaitForClusterReady(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
timeout := "5m"
expectedTimeout := "300.00s"
tt.e.EXPECT().Execute(
tt.ctx,
"wait", "--timeout", expectedTimeout, "--for=condition=Ready", "clusters.cluster.x-k8s.io/test", "--kubeconfig", tt.cluster.KubeconfigFile, "-n", "eksa-system",
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.WaitForClusterReady(tt.ctx, tt.cluster, timeout, "test")).To(Succeed())
}
func TestWaitForRufioMachines(t *testing.T) {
t.Parallel()
kt := newKubectlTest(t)
timeout := "5m"
expectedTimeout := "300.00s"
kt.e.EXPECT().Execute(
kt.ctx,
"wait", "--timeout", expectedTimeout, "--for=condition=Contactable", "machines.bmc.tinkerbell.org", "--kubeconfig", kt.cluster.KubeconfigFile, "-n", "eksa-system", "--all",
).Return(bytes.Buffer{}, nil)
kt.Expect(kt.k.WaitForRufioMachines(kt.ctx, kt.cluster, timeout, "Contactable", "eksa-system")).To(Succeed())
}
func TestKubectlApply(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
secret := &corev1.Secret{}
b, err := yaml.Marshal(secret)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().ExecuteWithStdin(
tt.ctx,
b,
"apply", "-f", "-", "--kubeconfig", tt.kubeconfig,
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.Apply(tt.ctx, tt.kubeconfig, secret)).To(Succeed())
}
func TestKubectlListObjects(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
list := &v1alpha1.ClusterList{}
b, err := json.Marshal(list)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--namespace", tt.namespace,
).Return(*bytes.NewBuffer(b), nil)
tt.Expect(tt.k.ListObjects(tt.ctx, "clusters", tt.namespace, tt.kubeconfig, &v1alpha1.ClusterList{})).To(Succeed())
}
func TestKubectlListObjectsExecError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--namespace", tt.namespace,
).Return(bytes.Buffer{}, errors.New("error"))
tt.Expect(tt.k.ListObjects(tt.ctx, "clusters", tt.namespace, tt.kubeconfig, &v1alpha1.ClusterList{})).To(MatchError(ContainSubstring("getting clusters with kubectl: error")))
}
func TestKubectlListObjectsMarshalError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--namespace", tt.namespace,
).Return(*bytes.NewBufferString("//"), nil)
tt.Expect(tt.k.ListObjects(tt.ctx, "clusters", tt.namespace, tt.kubeconfig, &v1alpha1.ClusterList{})).To(MatchError(ContainSubstring("parsing get clusters response")))
}
func TestKubectlHasResource(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
pbc := &packagesv1.PackageBundleController{
TypeMeta: metav1.TypeMeta{Kind: "packageBundleController"},
Spec: packagesv1.PackageBundleControllerSpec{
ActiveBundle: "some bundle",
},
}
b, err := json.Marshal(pbc)
tt.Expect(err).To(Succeed())
tt.e.EXPECT().Execute(tt.ctx,
"get", "--ignore-not-found", "-o", "json",
"--kubeconfig", tt.kubeconfig, "packageBundleController", "--namespace", tt.namespace, "testResourceName",
).Return(*bytes.NewBuffer(b), nil)
has, err := tt.k.HasResource(tt.ctx, "packageBundleController", "testResourceName", tt.kubeconfig, tt.namespace)
tt.Expect(err).To(Succeed())
tt.Expect(has).To(BeTrue())
}
func TestKubectlHasResourceWithGetError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(tt.ctx,
"get", "--ignore-not-found", "-o", "json",
"--kubeconfig", tt.kubeconfig, "packageBundleController", "--namespace", tt.namespace, "testResourceName",
).Return(bytes.Buffer{}, fmt.Errorf("test error"))
has, err := tt.k.HasResource(tt.ctx, "packageBundleController", "testResourceName", tt.kubeconfig, tt.namespace)
tt.Expect(err).To(MatchError(ContainSubstring("test error")))
tt.Expect(has).To(BeFalse())
}
func TestKubectlDeletePackageResources(t *testing.T) {
t.Parallel()
t.Run("golden path", func(t *testing.T) {
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "packagebundlecontroller.packages.eks.amazonaws.com", "clusterName", "--kubeconfig", tt.kubeconfig, "--namespace", "eksa-packages", "--ignore-not-found=true",
).Return(*bytes.NewBufferString("//"), nil)
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "namespace", "eksa-packages-clusterName", "--kubeconfig", tt.kubeconfig, "--ignore-not-found=true",
).Return(*bytes.NewBufferString("//"), nil)
tt.Expect(tt.k.DeletePackageResources(tt.ctx, tt.cluster, "clusterName")).To(Succeed())
})
t.Run("pbc failure", func(t *testing.T) {
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "packagebundlecontroller.packages.eks.amazonaws.com", "clusterName", "--kubeconfig", tt.kubeconfig, "--namespace", "eksa-packages", "--ignore-not-found=true",
).Return(*bytes.NewBufferString("//"), fmt.Errorf("bam"))
tt.Expect(tt.k.DeletePackageResources(tt.ctx, tt.cluster, "clusterName")).To(MatchError(ContainSubstring("bam")))
})
t.Run("namespace failure", func(t *testing.T) {
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "packagebundlecontroller.packages.eks.amazonaws.com", "clusterName", "--kubeconfig", tt.kubeconfig, "--namespace", "eksa-packages", "--ignore-not-found=true",
).Return(*bytes.NewBufferString("//"), nil)
tt.e.EXPECT().Execute(
tt.ctx,
"delete", "namespace", "eksa-packages-clusterName", "--kubeconfig", tt.kubeconfig, "--ignore-not-found=true",
).Return(*bytes.NewBufferString("//"), fmt.Errorf("boom"))
tt.Expect(tt.k.DeletePackageResources(tt.ctx, tt.cluster, "clusterName")).To(MatchError(ContainSubstring("boom")))
})
}
func TestKubectlExecuteFromYaml(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().ExecuteWithStdin(
tt.ctx, []byte(nutanixMachineConfigSpec), "apply", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace,
).Return(bytes.Buffer{}, nil)
_, err := tt.k.ExecuteFromYaml(tt.ctx, []byte(nutanixMachineConfigSpec), "apply", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace)
tt.Expect(err).ToNot(HaveOccurred())
}
func TestKubectlSearchNutanixMachineConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"get", "nutanixmachineconfigs.anywhere.eks.amazonaws.com", "-o", "json", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--field-selector=metadata.name=eksa-unit-test",
).Return(*bytes.NewBufferString(nutanixMachineConfigsJSON), nil)
items, err := tt.k.SearchNutanixMachineConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).ToNot(HaveOccurred())
tt.Expect(items).To(HaveLen(1))
}
func TestKubectlSearchNutanixMachineConfigError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"get", "nutanixmachineconfigs.anywhere.eks.amazonaws.com", "-o", "json", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--field-selector=metadata.name=eksa-unit-test",
).Return(bytes.Buffer{}, fmt.Errorf("error"))
items, err := tt.k.SearchNutanixMachineConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).To(HaveOccurred())
tt.Expect(items).To(BeNil())
}
func TestKubectlSearchNutanixDatacenterConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"get", "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "-o", "json", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--field-selector=metadata.name=eksa-unit-test",
).Return(*bytes.NewBufferString(nutanixDatacenterConfigsJSON), nil)
items, err := tt.k.SearchNutanixDatacenterConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).ToNot(HaveOccurred())
tt.Expect(items).To(HaveLen(1))
}
func TestKubectlSearchNutanixDatacenterConfigError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(
tt.ctx,
"get", "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "-o", "json", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--field-selector=metadata.name=eksa-unit-test",
).Return(bytes.Buffer{}, fmt.Errorf("error"))
items, err := tt.k.SearchNutanixDatacenterConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).To(HaveOccurred())
tt.Expect(items).To(BeNil())
}
func TestKubectlGetEksaNutanixMachineConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "get",
[]string{"--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "nutanixmachineconfigs.anywhere.eks.amazonaws.com", "--namespace", tt.namespace, "eksa-unit-test"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(*bytes.NewBufferString(nutanixMachineConfigSpecJSON), nil)
item, err := tt.k.GetEksaNutanixMachineConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).ToNot(HaveOccurred())
tt.Expect(item).ToNot(BeNil())
}
func TestKubectlGetEksaNutanixMachineConfigError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "get",
[]string{"--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "nutanixmachineconfigs.anywhere.eks.amazonaws.com", "--namespace", tt.namespace, "eksa-unit-test"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, fmt.Errorf("error"))
item, err := tt.k.GetEksaNutanixMachineConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).To(HaveOccurred())
tt.Expect(item).To(BeNil())
}
func TestKubectlGetEksaNutanixDatacenterConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "get",
[]string{"--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "--namespace", tt.namespace, "eksa-unit-test"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(*bytes.NewBufferString(nutanixDatacenterConfigSpecJSON), nil)
item, err := tt.k.GetEksaNutanixDatacenterConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).ToNot(HaveOccurred())
tt.Expect(item).ToNot(BeNil())
}
func TestKubectlGetEksaNutanixDatacenterConfigError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "get",
[]string{"--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "--namespace", tt.namespace, "eksa-unit-test"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, fmt.Errorf("error"))
item, err := tt.k.GetEksaNutanixDatacenterConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)
tt.Expect(err).To(HaveOccurred())
tt.Expect(item).To(BeNil())
}
func TestKubectlDeleteEksaNutanixDatacenterConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "delete",
[]string{"nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "eksa-unit-test", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--ignore-not-found=true"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.DeleteEksaNutanixDatacenterConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)).To(Succeed())
}
func TestKubectlDeleteEksaNutanixDatacenterConfigError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "delete",
[]string{"nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "eksa-unit-test", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--ignore-not-found=true"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, fmt.Errorf("error"))
tt.Expect(tt.k.DeleteEksaNutanixDatacenterConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)).NotTo(Succeed())
}
func TestKubectlDeleteEksaNutanixMachineConfig(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "delete",
[]string{"nutanixmachineconfigs.anywhere.eks.amazonaws.com", "eksa-unit-test", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--ignore-not-found=true"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.DeleteEksaNutanixMachineConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)).To(Succeed())
}
func TestKubectlDeleteEksaNutanixMachineConfigError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
tt.e.EXPECT().Execute(gomock.Any(), "delete",
[]string{"nutanixmachineconfigs.anywhere.eks.amazonaws.com", "eksa-unit-test", "--kubeconfig", tt.kubeconfig, "--namespace", tt.namespace, "--ignore-not-found=true"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, fmt.Errorf("error"))
tt.Expect(tt.k.DeleteEksaNutanixMachineConfig(tt.ctx, "eksa-unit-test", tt.kubeconfig, tt.namespace)).NotTo(Succeed())
}
func TestWaitForResourceRolledout(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
timeout := "2m"
target := "testdaemonset"
var b bytes.Buffer
expectedParam := []string{"rollout", "status", "daemonset", target, "--kubeconfig", tt.kubeconfig, "--namespace", "eksa-system", "--timeout", timeout}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitForResourceRolledout(tt.ctx, tt.cluster, timeout, target, "eksa-system", "daemonset")).To(Succeed())
}
func TestWaitForDaemonsetRolledoutError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
timeout := "2m"
target := "testdaemonset"
var b bytes.Buffer
expectedParam := []string{"rollout", "status", "daemonset", target, "--kubeconfig", tt.kubeconfig, "--namespace", "eksa-system", "--timeout", timeout}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, fmt.Errorf("error")).AnyTimes()
tt.Expect(tt.k.WaitForResourceRolledout(tt.ctx, tt.cluster, timeout, target, "eksa-system", "daemonset")).NotTo(Succeed())
}
func TestWaitForPod(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
timeout := "2m"
expectedTimeout := "120.00s"
condition := "status.containerStatuses[0].state.terminated.reason"
target := "testpod"
tt.e.EXPECT().Execute(gomock.Any(), "wait", "--timeout",
[]string{expectedTimeout, fmt.Sprintf("%s=%s", "--for=condition", condition), fmt.Sprintf("%s/%s", "pod", target), "--kubeconfig", tt.kubeconfig, "-n", "eksa-system"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.WaitForPod(tt.ctx, tt.cluster, timeout, condition, target, "eksa-system")).To(Succeed())
}
func TestWaitForJob(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
timeout := "2m"
expectedTimeout := "120.00s"
condition := "status.containerStatuses[0].state.terminated.reason"
target := "testpod"
tt.e.EXPECT().Execute(gomock.Any(), "wait", "--timeout",
[]string{expectedTimeout, fmt.Sprintf("%s=%s", "--for=condition", condition), fmt.Sprintf("%s/%s", "job", target), "--kubeconfig", tt.kubeconfig, "-n", "eksa-system"},
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(bytes.Buffer{}, nil)
tt.Expect(tt.k.WaitForJobCompleted(tt.ctx, tt.cluster.KubeconfigFile, timeout, condition, target, "eksa-system")).To(Succeed())
}
func TestWaitForPodCompleted(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'Completed'")
tt := newKubectlTest(t)
expectedParam := []string{"get", "pod/testpod", "-o", fmt.Sprintf("%s=%s", "jsonpath", "'{.status.containerStatuses[0].state.terminated.reason}'"), "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitForPodCompleted(tt.ctx, tt.cluster, "testpod", "2m", "eksa-system")).To(Succeed())
}
func TestWaitForPackagesInstalled(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"get", "packages.packages.eks.amazonaws.com/testpackage", "-o", fmt.Sprintf("%s=%s", "jsonpath", "'{.status.state}'"), "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
err := tt.k.WaitForPackagesInstalled(tt.ctx, tt.cluster, "testpackage", "2m", "eksa-system")
tt.Expect(err).ToNot(HaveOccurred())
}
func TestWaitJSONPathLoop(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"get", "packages.packages.eks.amazonaws.com/testpackage", "-o", fmt.Sprintf("%s=%s", "jsonpath", "'{.status.state}'"), "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
err := tt.k.WaitJSONPathLoop(tt.ctx, tt.cluster.KubeconfigFile, "2m", "status.state", "installed", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")
tt.Expect(err).ToNot(HaveOccurred())
tt.Expect(b.String()).ToNot(ContainSubstring("waiting 5 seconds...."))
}
func TestWaitJSONPathLoopTimeParseError(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"get", "packages.packages.eks.amazonaws.com/testpackage", "-o", fmt.Sprintf("%s=%s", "jsonpath", "'{.status.state}'"), "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitJSONPathLoop(tt.ctx, tt.cluster.KubeconfigFile, "", "status.state", "installed", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")).To(MatchError(ContainSubstring("unparsable timeout specified")))
}
func TestWaitJSONPathLoopNegativeTimeError(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"get", "packages.packages.eks.amazonaws.com/testpackage", "-o", fmt.Sprintf("%s=%s", "jsonpath", "'{.status.state}'"), "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitJSONPathLoop(tt.ctx, tt.cluster.KubeconfigFile, "-1m", "status.state", "installed", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")).To(MatchError(ContainSubstring("negative timeout specified")))
}
func TestWaitJSONPathLoopRetrierError(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("")
tt := newKubectlTest(t)
expectedParam := []string{"get", "packages.packages.eks.amazonaws.com/testpackage", "-o", fmt.Sprintf("%s=%s", "jsonpath", "'{.}'"), "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitJSONPathLoop(tt.ctx, tt.cluster.KubeconfigFile, "2m", "", "", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")).To(MatchError(ContainSubstring("executing wait")))
}
func TestWaitJSONPath(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"wait", "--timeout", "2m", fmt.Sprintf("--for=jsonpath='{.%s}'=%s", "status.state", "installed"), "packages.packages.eks.amazonaws.com/testpackage", "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
err := tt.k.WaitJSONPath(tt.ctx, tt.cluster.KubeconfigFile, "2m", "status.state", "installed", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")
tt.Expect(err).To(BeNil())
}
func TestWaitJSONPathTimeParseError(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"wait", "--timeout", "2m", fmt.Sprintf("--for=jsonpath='{.%s}'=%s", "status.state", "installed"), "packages.packages.eks.amazonaws.com/testpackage", "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitJSONPath(tt.ctx, tt.cluster.KubeconfigFile, "", "status.state", "installed", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")).To(MatchError(ContainSubstring("unparsable timeout specified")))
}
func TestWaitJSONPathNegativeTimeError(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"wait", "--timeout", "2m", fmt.Sprintf("--for=jsonpath='{.%s}'=%s", "status.state", "installed"), "packages.packages.eks.amazonaws.com/testpackage", "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitJSONPath(tt.ctx, tt.cluster.KubeconfigFile, "-1m", "status.state", "installed", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")).To(MatchError(ContainSubstring("negative timeout specified")))
}
func TestWaitJSONPathRetrierError(t *testing.T) {
t.Parallel()
var b bytes.Buffer
b.WriteString("'installed'")
tt := newKubectlTest(t)
expectedParam := []string{"wait", "--timeout", "2m", fmt.Sprintf("--for=jsonpath='{.%s}'=%s", "", ""), "packages.packages.eks.amazonaws.com/testpackage", "--kubeconfig", "c.kubeconfig", "-n", "eksa-system"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
tt.Expect(tt.k.WaitJSONPath(tt.ctx, tt.cluster.KubeconfigFile, "2m", "", "", "packages.packages.eks.amazonaws.com/testpackage", "eksa-system")).To(MatchError(ContainSubstring("executing wait")))
}
func TestGetPackageBundleController(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
testPbc := &packagesv1.PackageBundleController{}
respJSON, err := json.Marshal(testPbc)
if err != nil {
t.Errorf("marshaling test service: %s", err)
}
ret := bytes.NewBuffer(respJSON)
expectedParam := []string{"get", "packagebundlecontroller.packages.eks.amazonaws.com", "testcluster", "-o", "json", "--kubeconfig", "c.kubeconfig", "--namespace", "eksa-packages", "--ignore-not-found=true"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(*ret, nil).AnyTimes()
if _, err := tt.k.GetPackageBundleController(tt.ctx, tt.cluster.KubeconfigFile, "testcluster"); err != nil {
t.Errorf("Kubectl.GetPackageBundleController() error = %v, want nil", err)
}
}
func TestGetPackageBundleList(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
testPbc := &packagesv1.PackageBundleList{
TypeMeta: metav1.TypeMeta{
Kind: "PackageBundle",
},
}
respJSON, err := json.Marshal(testPbc)
if err != nil {
t.Errorf("marshaling test service: %s", err)
}
ret := bytes.NewBuffer(respJSON)
expectedParam := []string{"get", "packagebundles.packages.eks.amazonaws.com", "-o", "jsonpath='{.items}'", "--kubeconfig", "c.kubeconfig", "-n", "eksa-packages"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(*ret, nil).AnyTimes()
expectedParam = []string{"get", "packagebundles.packages.eks.amazonaws.com", "-o", "json", "--kubeconfig", "c.kubeconfig", "--namespace", "eksa-packages", "--ignore-not-found=true"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(*ret, nil).AnyTimes()
if _, err := tt.k.GetPackageBundleList(tt.ctx, tt.cluster.KubeconfigFile); err != nil {
t.Errorf("Kubectl.GetPackageBundleList() error = %v, want nil", err)
}
}
func TestRunBusyBoxPod(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
var b bytes.Buffer
expectedParam := []string{"run", "testpod-123", "--image=yauritux/busybox-curl", "-o", "json", "--kubeconfig", "c.kubeconfig", "--namespace", "eksa-packages", "--restart=Never", "pwd"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
if _, err := tt.k.RunBusyBoxPod(tt.ctx, "eksa-packages", "testpod-123", tt.cluster.KubeconfigFile, []string{"pwd"}); err != nil {
t.Errorf("Kubectl.RunBusyBoxPod() error = %v, want nil", err)
}
}
func TestGetPodNameByLabel(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
var b bytes.Buffer
expectedParam := []string{"get", "pod", "-l=app.kubernetes.io/name=aws-otel-collector", "-o=jsonpath='{.items[0].metadata.name}'", "--kubeconfig", "c.kubeconfig", "--namespace", "observability"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
if _, err := tt.k.GetPodNameByLabel(tt.ctx, "observability", "app.kubernetes.io/name=aws-otel-collector", tt.cluster.KubeconfigFile); err != nil {
t.Errorf("Kubectl.GetPodNameByLabel() error = %v, want nil", err)
}
}
func TestGetPodIP(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
var b bytes.Buffer
expectedParam := []string{"get", "pod", "generated-adot-75f769bc7-f7pmv", "-o=jsonpath='{.status.podIP}'", "--kubeconfig", "c.kubeconfig", "--namespace", "observability"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
if _, err := tt.k.GetPodIP(tt.ctx, "observability", "generated-adot-75f769bc7-f7pmv", tt.cluster.KubeconfigFile); err != nil {
t.Errorf("Kubectl.GetPodIP() error = %v, want nil", err)
}
}
func TestGetPodLogs(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
var b bytes.Buffer
expectedParam := []string{"logs", "testpod", "testcontainer", "--kubeconfig", "c.kubeconfig", "--namespace", "eksa-packages"}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
if _, err := tt.k.GetPodLogs(tt.ctx, "eksa-packages", "testpod", "testcontainer", tt.cluster.KubeconfigFile); err != nil {
t.Errorf("Kubectl.GetPodLogs() error = %v, want nil", err)
}
}
func TestGetPodLogsSince(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
var b bytes.Buffer
now := time.Now()
expectedParam := []string{"logs", "testpod", "testcontainer", "--kubeconfig", "c.kubeconfig", "--namespace", "eksa-packages", "--since-time", now.Format(time.RFC3339)}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
if _, err := tt.k.GetPodLogsSince(tt.ctx, "eksa-packages", "testpod", "testcontainer", tt.cluster.KubeconfigFile, now); err != nil {
t.Errorf("Kubectl.GetPodLogsSince() error = %v, want nil", err)
}
}
func TestGetPodLogsSinceInternalError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
now := time.Now()
var b bytes.Buffer
b.WriteString("Internal Error")
expectedParam := []string{"logs", "testpod", "testcontainer", "--kubeconfig", "c.kubeconfig", "--namespace", "eksa-packages", "--since-time", now.Format(time.RFC3339)}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, nil).AnyTimes()
_, err := tt.k.GetPodLogsSince(tt.ctx, "eksa-packages", "testpod", "testcontainer", tt.cluster.KubeconfigFile, now)
tt.Expect(err).To(MatchError(ContainSubstring("Internal Error")))
}
func TestGetPodLogsSinceExecuteError(t *testing.T) {
t.Parallel()
tt := newKubectlTest(t)
now := time.Now()
var b bytes.Buffer
b.WriteString("execute error")
expectedParam := []string{"logs", "testpod", "testcontainer", "--kubeconfig", "c.kubeconfig", "--namespace", "eksa-packages", "--since-time", now.Format(time.RFC3339)}
tt.e.EXPECT().Execute(gomock.Any(), gomock.Eq(expectedParam)).Return(b, fmt.Errorf("execute error")).AnyTimes()
_, err := tt.k.GetPodLogsSince(tt.ctx, "eksa-packages", "testpod", "testcontainer", tt.cluster.KubeconfigFile, now)
tt.Expect(err).To(MatchError(ContainSubstring("execute error")))
}
func TestKubectlSearchTinkerbellMachineConfig(t *testing.T) {
t.Parallel()
var kubeconfig, namespace, name string
buffer := bytes.Buffer{}
buffer.WriteString(test.ReadFile(t, "testdata/kubectl_no_cs_machineconfigs.json"))
k, ctx, _, e := newKubectl(t)
expectedParam := []string{
"get", fmt.Sprintf("tinkerbellmachineconfigs.%s", v1alpha1.GroupVersion.Group), "-o", "json", "--kubeconfig",
kubeconfig, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(buffer, nil)
mc, err := k.SearchTinkerbellMachineConfig(ctx, name, kubeconfig, namespace)
if err != nil {
t.Errorf("Kubectl.SearchTinkerbellMachineConfig() error = %v, want nil", err)
}
if len(mc) > 0 {
t.Errorf("expected 0 machine configs, got %d", len(mc))
}
}
func TestKubectlSearchTinkerbellMachineConfigNotFound(t *testing.T) {
t.Parallel()
var kubeconfigfile string
tt := newKubectlTest(t)
params := []string{
"get", fmt.Sprintf("tinkerbellmachineconfigs.%s", v1alpha1.GroupVersion.Group), "-o", "json", "--kubeconfig",
kubeconfigfile, "--namespace", tt.namespace, "--field-selector=metadata.name=test",
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(""), errors.New("machineconfig not found"))
_, err := tt.k.SearchTinkerbellMachineConfig(tt.ctx, "test", kubeconfigfile, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestKubectlSearchTinkerbellDatacenterConfigs(t *testing.T) {
t.Parallel()
var kubeconfig, namespace, name string
buffer := bytes.Buffer{}
buffer.WriteString(test.ReadFile(t, "testdata/kubectl_no_cs_datacenterconfigs.json"))
k, ctx, _, e := newKubectl(t)
expectedParam := []string{
"get", fmt.Sprintf("tinkerbelldatacenterconfigs.%s", v1alpha1.GroupVersion.Group), "-o", "json", "--kubeconfig",
kubeconfig, "--namespace", namespace, "--field-selector=metadata.name=" + name,
}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(buffer, nil)
mc, err := k.SearchTinkerbellDatacenterConfig(ctx, name, kubeconfig, namespace)
if err != nil {
t.Errorf("Kubectl.SearchTinkerbellDatacenterConfig() error = %v, want nil", err)
}
if len(mc) > 0 {
t.Errorf("expected 0 datacenter configs, got %d", len(mc))
}
}
func TestKubectlSearchTinkerbellDatacenterConfigNotFound(t *testing.T) {
t.Parallel()
var kubeconfigfile string
tt := newKubectlTest(t)
params := []string{
"get", fmt.Sprintf("tinkerbelldatacenterconfigs.%s", v1alpha1.GroupVersion.Group), "-o", "json", "--kubeconfig",
kubeconfigfile, "--namespace", tt.namespace, "--field-selector=metadata.name=test",
}
tt.e.EXPECT().Execute(tt.ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(""), errors.New("datacenterconfig not found"))
_, err := tt.k.SearchTinkerbellDatacenterConfig(tt.ctx, "test", kubeconfigfile, tt.namespace)
tt.Expect(err).NotTo(BeNil())
}
func TestGetMachineDeploymentsForCluster(t *testing.T) {
t.Parallel()
k, ctx, _, e := newKubectl(t)
clusterName := "test0"
fileContent := test.ReadFile(t, "testdata/kubectl_machine_deployments.json")
wantMachineDeploymentNames := []string{
"test0-md-0",
"test1-md-0",
}
params := []string{
"get", fmt.Sprintf("machinedeployments.%s", clusterv1.GroupVersion.Group), "-o", "json",
fmt.Sprintf("--selector=cluster.x-k8s.io/cluster-name=%s", clusterName),
}
e.EXPECT().Execute(ctx, gomock.Eq(params)).Return(*bytes.NewBufferString(""), nil).Return(*bytes.NewBufferString(fileContent), nil)
gotMachineDeployments, err := k.GetMachineDeploymentsForCluster(ctx, clusterName)
if err != nil {
t.Fatalf("Kubectl.GetMachineDeploymentsForCluster() error = %v, want nil", err)
}
gotMachineDeploymentNames := make([]string, 0, len(gotMachineDeployments))
for _, p := range gotMachineDeployments {
gotMachineDeploymentNames = append(gotMachineDeploymentNames, p.Name)
}
if !reflect.DeepEqual(gotMachineDeploymentNames, wantMachineDeploymentNames) {
t.Fatalf("Kubectl.GetMachineDeployments() deployments = %+v, want %+v", gotMachineDeploymentNames, wantMachineDeploymentNames)
}
}
func TestKubectlHasCRD(t *testing.T) {
t.Parallel()
for _, tt := range []struct {
Name string
Error error
ExpectCRD bool
ExpectErr bool
}{
{
Name: "CRDPresent",
ExpectCRD: true,
},
{
Name: "CRDNotPresent",
Error: errors.New("NotFound"),
},
{
Name: "Error",
Error: errors.New("some error"),
ExpectErr: true,
},
} {
t.Run(tt.Name, func(t *testing.T) {
k, ctx, _, e := newKubectl(t)
const crd = "foo"
const kubeconfig = "kubeconfig"
var b bytes.Buffer
params := []string{"get", "customresourcedefinition", crd, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(params)).Return(b, tt.Error)
r, err := k.HasCRD(context.Background(), crd, kubeconfig)
if tt.ExpectErr && !strings.Contains(err.Error(), tt.Error.Error()) {
t.Fatalf("Expected error: %v; Received: %v", tt.ExpectErr, err)
}
if r != tt.ExpectCRD {
t.Fatalf("Expected: %v; Received: %v", tt.ExpectCRD, r)
}
})
}
}
func TestKubectlDeleteCRD(t *testing.T) {
t.Parallel()
//"delete", "crd", crd, "--kubeconfig", kubeconfig
for _, tt := range []struct {
Name string
Error error
ExpectErr bool
}{
{
Name: "CRDPresent",
},
{
Name: "CRDNotPresent",
Error: errors.New("NotFound"),
},
{
Name: "Error",
Error: errors.New("some error"),
ExpectErr: true,
},
} {
t.Run(tt.Name, func(t *testing.T) {
k, ctx, _, e := newKubectl(t)
const crd = "foo"
const kubeconfig = "kubeconfig"
var b bytes.Buffer
params := []string{"delete", "customresourcedefinition", crd, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(params)).Return(b, tt.Error)
err := k.DeleteCRD(context.Background(), crd, kubeconfig)
if tt.ExpectErr && !strings.Contains(err.Error(), tt.Error.Error()) {
t.Fatalf("Expected error: %v; Received: %v", tt.ExpectErr, err)
}
})
}
}
| 3,869 |
eks-anywhere | aws | Go | package executables
import "context"
type localExecutableBuilder struct{}
func newLocalExecutableBuilder() localExecutableBuilder {
return localExecutableBuilder{}
}
func (b localExecutableBuilder) Build(binaryPath string) Executable {
return NewExecutable(binaryPath)
}
func (b localExecutableBuilder) Init(_ context.Context) (Closer, error) {
return NoOpClose, nil
}
func NoOpClose(ctx context.Context) error {
return nil
}
| 22 |
eks-anywhere | aws | Go | package executables
import (
"bytes"
"context"
"fmt"
"strings"
"github.com/aws/eks-anywhere/pkg/logger"
)
const sonobuoyPath = "./sonobuoy"
type Sonobuoy struct {
Executable
}
func NewSonobuoy(executable Executable) *Sonobuoy {
return &Sonobuoy{
Executable: executable,
}
}
func (k *Sonobuoy) Run(ctx context.Context, contextName string, args ...string) (string, error) {
logger.Info("Starting sonobuoy tests")
executionArgs := []string{
"--context",
contextName,
"run",
"--mode=certified-conformance",
"--wait",
}
executionArgs = append(executionArgs, args...)
output, err := k.Execute(ctx, executionArgs...)
command := strings.Join(executionArgs, " ") + "\n"
if err != nil {
return command, fmt.Errorf("executing sonobuoy: %v", err)
}
return command + output.String(), err
}
func (k *Sonobuoy) GetResults(ctx context.Context, contextName string, args ...string) (string, error) {
executionArgs := []string{
"--context",
contextName,
"retrieve",
"./results",
}
var output bytes.Buffer
output, err := k.Execute(ctx, executionArgs...)
if err != nil {
return "", fmt.Errorf("executing sonobuoy retrieve: %v", err)
}
outputFile := strings.TrimSpace(output.String())
logger.Info("Sonobuoy results file: " + outputFile)
executionArgs = []string{
"results",
outputFile,
}
output, err = k.Execute(ctx, executionArgs...)
command := strings.Join(executionArgs, " ") + "\n"
if err != nil {
return command, fmt.Errorf("executing sonobuoy results command: %v", err)
}
return command + output.String(), err
}
| 68 |
eks-anywhere | aws | Go | package executables
import (
"context"
"fmt"
)
// SSH is an executable for running SSH commands.
type SSH struct {
Executable
}
const (
sshPath = "ssh"
strictHostCheckFlag = "StrictHostKeyChecking=no"
)
// NewSSH returns a new instance of SSH client.
func NewSSH(executable Executable) *SSH {
return &SSH{
Executable: executable,
}
}
// RunCommand runs a command on the host using SSH.
func (s *SSH) RunCommand(ctx context.Context, privateKeyPath, username, IP string, command ...string) (string, error) {
params := []string{
"-i", privateKeyPath,
"-o", strictHostCheckFlag,
fmt.Sprintf("%s@%s", username, IP),
}
params = append(params, command...)
out, err := s.Executable.Execute(ctx, params...)
if err != nil {
return "", fmt.Errorf("running SSH command: %v", err)
}
return out.String(), nil
}
| 41 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"errors"
"fmt"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
)
var (
privateKeyPath = "id_rsa"
username = "eksa-test"
ip = "1.2.3.4"
command = []string{"some", "random", "test", "command"}
)
func TestSSHRunCommandNoError(t *testing.T) {
ctx := context.Background()
g := NewWithT(t)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
ssh := executables.NewSSH(executable)
executable.EXPECT().Execute(ctx, "-i", privateKeyPath, "-o", "StrictHostKeyChecking=no", fmt.Sprintf("%s@%s", username, ip), "some", "random", "test", "command")
_, err := ssh.RunCommand(ctx, privateKeyPath, username, ip, command...)
g.Expect(err).To(Not(HaveOccurred()))
}
func TestSSHRunCommandError(t *testing.T) {
ctx := context.Background()
g := NewWithT(t)
mockCtrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(mockCtrl)
ssh := executables.NewSSH(executable)
errMsg := "sshKey invalid"
executable.EXPECT().Execute(ctx, "-i", privateKeyPath, "-o", "StrictHostKeyChecking=no", fmt.Sprintf("%s@%s", username, ip), "some", "random", "test", "command").Return(bytes.Buffer{}, errors.New(errMsg))
_, err := ssh.RunCommand(ctx, privateKeyPath, username, ip, command...)
g.Expect(err).To(MatchError(fmt.Sprintf("running SSH command: %s", errMsg)))
}
| 50 |
eks-anywhere | aws | Go | package executables
import "sync"
type syncSlice struct {
internal []string
sync.RWMutex
}
func newSyncSlice() *syncSlice {
return &syncSlice{
internal: []string{},
}
}
func (s *syncSlice) append(v ...string) {
s.Lock()
defer s.Unlock()
s.internal = append(s.internal, v...)
}
func (s *syncSlice) iterate() <-chan string {
c := make(chan string)
go func() {
s.RLock()
defer s.RUnlock()
defer close(c)
for _, v := range s.internal {
c <- v
}
}()
return c
}
| 36 |
eks-anywhere | aws | Go | package executables
import (
"context"
"encoding/json"
"fmt"
"regexp"
"time"
)
const (
troubleshootPath = "support-bundle"
supportBundleArchiveRegex = `support-bundle-([0-9]+(-[0-9]+)+)T([0-9]+(_[0-9]+)+)\.tar\.gz`
)
type Troubleshoot struct {
Executable
}
func NewTroubleshoot(executable Executable) *Troubleshoot {
return &Troubleshoot{
Executable: executable,
}
}
func (t *Troubleshoot) Collect(ctx context.Context, bundlePath string, sinceTime *time.Time, kubeconfig string) (archivePath string, err error) {
marshalledTime, err := sinceTime.MarshalText()
if err != nil {
return "", fmt.Errorf("could not marshal sinceTime for Collect parameters: %v", err)
}
params := []string{bundlePath, "--kubeconfig", kubeconfig, "--interactive=false", "--since-time", string(marshalledTime)}
output, err := t.Execute(ctx, params...)
if err != nil {
return "", fmt.Errorf("executing support-bundle: %v", err)
}
archivePath, err = parseArchivePathFromCollectOutput(output.String())
if err != nil {
return "", fmt.Errorf("parsing support-bundle output: %v", err)
}
return archivePath, nil
}
func (t *Troubleshoot) Analyze(ctx context.Context, bundleSpecPath string, archivePath string) ([]*SupportBundleAnalysis, error) {
params := []string{"analyze", bundleSpecPath, "--bundle", archivePath, "--output", "json"}
output, err := t.Execute(ctx, params...)
if err != nil {
return nil, fmt.Errorf("analyzing support bundle %s with analyzers %s: %v", archivePath, bundleSpecPath, err)
}
var analysisOutput []*SupportBundleAnalysis
err = json.Unmarshal(output.Bytes(), &analysisOutput)
if err != nil {
return nil, fmt.Errorf("unmarshalling support-bundle analyze output: %v", err)
}
return analysisOutput, err
}
func parseArchivePathFromCollectOutput(tsLogs string) (archivePath string, err error) {
r, err := regexp.Compile(supportBundleArchiveRegex)
if err != nil {
return "", fmt.Errorf("parsing support-bundle output: %v", err)
}
archivePath = r.FindString(tsLogs)
if archivePath == "" {
return "", fmt.Errorf("parsing support-bundle output: could not find archive path in output")
}
return archivePath, nil
}
type SupportBundleAnalysis struct {
Title string `json:"title"`
IsPass bool `json:"isPass"`
IsFail bool `json:"isFail"`
IsWarn bool `json:"isWarn"`
Message string `json:"message"`
Uri string `json:"URI"`
}
| 78 |
eks-anywhere | aws | Go | package executables_test
import (
"bytes"
"context"
"encoding/json"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
archivePath = "support-bundle-2021-09-17T16_22_45.tar.gz"
bundlePath = "./testBundleThatDoesNotExist.yaml"
sinceTimeString = "2021-09-17T16:22:45Z"
)
func TestTroubleshootCollectSuccess(t *testing.T) {
ts, ctx, cluster, e := newTroubleshoot(t)
sinceTime, err := time.Parse(time.RFC3339, sinceTimeString)
if err != nil {
t.Errorf("Troubleshoot.Collect() error: failed to parse time: %v", err)
}
expectedParams := []string{bundlePath, "--kubeconfig", cluster.KubeconfigFile, "--interactive=false", "--since-time", sinceTimeString}
returnBuffer := bytes.Buffer{}
returnBuffer.Write([]byte(archivePath))
e.EXPECT().Execute(ctx, gomock.Eq(expectedParams)).Return(returnBuffer, nil)
if _, err := ts.Collect(ctx, bundlePath, &sinceTime, cluster.KubeconfigFile); err != nil {
t.Errorf("Troubleshoot.Collect() error = %v, want nil", err)
}
}
func TestTroubleshootAnalyzeSuccess(t *testing.T) {
ts, ctx, _, e := newTroubleshoot(t)
var returnValues []*executables.SupportBundleAnalysis
returnValues = append(returnValues, &executables.SupportBundleAnalysis{})
returnJson, err := json.Marshal(returnValues)
if err != nil {
return
}
returnBuffer := bytes.Buffer{}
returnBuffer.Write(returnJson)
expectedParams := []string{"analyze", bundlePath, "--bundle", archivePath, "--output", "json"}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParams)).Return(returnBuffer, nil)
if _, err := ts.Analyze(ctx, bundlePath, archivePath); err != nil {
t.Errorf("Troubleshoot.Analyze() error = %v, want nil", err)
}
}
func newTroubleshoot(t *testing.T) (*executables.Troubleshoot, context.Context, *types.Cluster, *mockexecutables.MockExecutable) {
kubeconfigFile := "c.kubeconfig"
cluster := &types.Cluster{
KubeconfigFile: kubeconfigFile,
Name: "test-cluster",
}
ctx := context.Background()
ctrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(ctrl)
return executables.NewTroubleshoot(executable), ctx, cluster, executable
}
| 69 |
eks-anywhere | aws | Go | package cmk
import (
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
// Builder serves as an interface wrapper to wrap the executablesBuilder without coupling consumers with its logic.
type Builder struct {
builder *executables.ExecutablesBuilder
}
// NewCmkBuilder initializes the cmk executable builder.
func NewCmkBuilder(builder *executables.ExecutablesBuilder) *Builder {
return &Builder{builder: builder}
}
// BuildCloudstackClient exposes a single method to consumers to abstract away executableBuilder's other operations and business logic.
func (b *Builder) BuildCloudstackClient(writer filewriter.FileWriter, config *decoder.CloudStackExecConfig) (cloudstack.ProviderCmkClient, error) {
return b.builder.BuildCmkExecutable(writer, config)
}
| 24 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/executables (interfaces: Executable,DockerClient,DockerContainer)
// Package mocks is a generated GoMock package.
package mocks
import (
bytes "bytes"
context "context"
reflect "reflect"
executables "github.com/aws/eks-anywhere/pkg/executables"
gomock "github.com/golang/mock/gomock"
)
// MockExecutable is a mock of Executable interface.
type MockExecutable struct {
ctrl *gomock.Controller
recorder *MockExecutableMockRecorder
}
// MockExecutableMockRecorder is the mock recorder for MockExecutable.
type MockExecutableMockRecorder struct {
mock *MockExecutable
}
// NewMockExecutable creates a new mock instance.
func NewMockExecutable(ctrl *gomock.Controller) *MockExecutable {
mock := &MockExecutable{ctrl: ctrl}
mock.recorder = &MockExecutableMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockExecutable) EXPECT() *MockExecutableMockRecorder {
return m.recorder
}
// Command mocks base method.
func (m *MockExecutable) Command(arg0 context.Context, arg1 ...string) *executables.Command {
m.ctrl.T.Helper()
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Command", varargs...)
ret0, _ := ret[0].(*executables.Command)
return ret0
}
// Command indicates an expected call of Command.
func (mr *MockExecutableMockRecorder) Command(arg0 interface{}, arg1 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0}, arg1...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Command", reflect.TypeOf((*MockExecutable)(nil).Command), varargs...)
}
// Execute mocks base method.
func (m *MockExecutable) Execute(arg0 context.Context, arg1 ...string) (bytes.Buffer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Execute", varargs...)
ret0, _ := ret[0].(bytes.Buffer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Execute indicates an expected call of Execute.
func (mr *MockExecutableMockRecorder) Execute(arg0 interface{}, arg1 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0}, arg1...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockExecutable)(nil).Execute), varargs...)
}
// ExecuteWithEnv mocks base method.
func (m *MockExecutable) ExecuteWithEnv(arg0 context.Context, arg1 map[string]string, arg2 ...string) (bytes.Buffer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExecuteWithEnv", varargs...)
ret0, _ := ret[0].(bytes.Buffer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExecuteWithEnv indicates an expected call of ExecuteWithEnv.
func (mr *MockExecutableMockRecorder) ExecuteWithEnv(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteWithEnv", reflect.TypeOf((*MockExecutable)(nil).ExecuteWithEnv), varargs...)
}
// ExecuteWithStdin mocks base method.
func (m *MockExecutable) ExecuteWithStdin(arg0 context.Context, arg1 []byte, arg2 ...string) (bytes.Buffer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExecuteWithStdin", varargs...)
ret0, _ := ret[0].(bytes.Buffer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExecuteWithStdin indicates an expected call of ExecuteWithStdin.
func (mr *MockExecutableMockRecorder) ExecuteWithStdin(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteWithStdin", reflect.TypeOf((*MockExecutable)(nil).ExecuteWithStdin), varargs...)
}
// Run mocks base method.
func (m *MockExecutable) Run(arg0 *executables.Command) (bytes.Buffer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Run", arg0)
ret0, _ := ret[0].(bytes.Buffer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Run indicates an expected call of Run.
func (mr *MockExecutableMockRecorder) Run(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockExecutable)(nil).Run), arg0)
}
// MockDockerClient is a mock of DockerClient interface.
type MockDockerClient struct {
ctrl *gomock.Controller
recorder *MockDockerClientMockRecorder
}
// MockDockerClientMockRecorder is the mock recorder for MockDockerClient.
type MockDockerClientMockRecorder struct {
mock *MockDockerClient
}
// NewMockDockerClient creates a new mock instance.
func NewMockDockerClient(ctrl *gomock.Controller) *MockDockerClient {
mock := &MockDockerClient{ctrl: ctrl}
mock.recorder = &MockDockerClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDockerClient) EXPECT() *MockDockerClientMockRecorder {
return m.recorder
}
// Execute mocks base method.
func (m *MockDockerClient) Execute(arg0 context.Context, arg1 ...string) (bytes.Buffer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Execute", varargs...)
ret0, _ := ret[0].(bytes.Buffer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Execute indicates an expected call of Execute.
func (mr *MockDockerClientMockRecorder) Execute(arg0 interface{}, arg1 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0}, arg1...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockDockerClient)(nil).Execute), varargs...)
}
// Login mocks base method.
func (m *MockDockerClient) Login(arg0 context.Context, arg1, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Login", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Login indicates an expected call of Login.
func (mr *MockDockerClientMockRecorder) Login(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Login", reflect.TypeOf((*MockDockerClient)(nil).Login), arg0, arg1, arg2, arg3)
}
// PullImage mocks base method.
func (m *MockDockerClient) PullImage(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PullImage", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// PullImage indicates an expected call of PullImage.
func (mr *MockDockerClientMockRecorder) PullImage(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockDockerClient)(nil).PullImage), arg0, arg1)
}
// MockDockerContainer is a mock of DockerContainer interface.
type MockDockerContainer struct {
ctrl *gomock.Controller
recorder *MockDockerContainerMockRecorder
}
// MockDockerContainerMockRecorder is the mock recorder for MockDockerContainer.
type MockDockerContainerMockRecorder struct {
mock *MockDockerContainer
}
// NewMockDockerContainer creates a new mock instance.
func NewMockDockerContainer(ctrl *gomock.Controller) *MockDockerContainer {
mock := &MockDockerContainer{ctrl: ctrl}
mock.recorder = &MockDockerContainerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDockerContainer) EXPECT() *MockDockerContainerMockRecorder {
return m.recorder
}
// Close mocks base method.
func (m *MockDockerContainer) Close(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close.
func (mr *MockDockerContainerMockRecorder) Close(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDockerContainer)(nil).Close), arg0)
}
// ContainerName mocks base method.
func (m *MockDockerContainer) ContainerName() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerName")
ret0, _ := ret[0].(string)
return ret0
}
// ContainerName indicates an expected call of ContainerName.
func (mr *MockDockerContainerMockRecorder) ContainerName() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerName", reflect.TypeOf((*MockDockerContainer)(nil).ContainerName))
}
// Init mocks base method.
func (m *MockDockerContainer) Init(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Init", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Init indicates an expected call of Init.
func (mr *MockDockerContainerMockRecorder) Init(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockDockerContainer)(nil).Init), arg0)
}
| 268 |
eks-anywhere | aws | Go | package features
// These are environment variables used as flags to enable/disable features.
const (
CloudStackKubeVipDisabledEnvVar = "CLOUDSTACK_KUBE_VIP_DISABLED"
FullLifecycleAPIEnvVar = "FULL_LIFECYCLE_API"
FullLifecycleGate = "FullLifecycleAPI"
CheckpointEnabledEnvVar = "CHECKPOINT_ENABLED"
UseNewWorkflowsEnvVar = "USE_NEW_WORKFLOWS"
ExperimentalSelfManagedClusterUpgradeEnvVar = "EXP_SELF_MANAGED_API_UPGRADE"
experimentalSelfManagedClusterUpgradeGate = "ExpSelfManagedAPIUpgrade"
)
func FeedGates(featureGates []string) {
globalFeatures.feedGates(featureGates)
}
type Feature struct {
Name string
IsActive func() bool
}
func IsActive(feature Feature) bool {
return feature.IsActive()
}
// ClearCache is mainly used for unit tests as of now.
func ClearCache() {
globalFeatures.clearCache()
}
func FullLifecycleAPI() Feature {
return Feature{
Name: "Full lifecycle API support through the EKS-A controller",
IsActive: globalFeatures.isActiveForEnvVarOrGate(FullLifecycleAPIEnvVar, FullLifecycleGate),
}
}
// ExperimentalSelfManagedClusterUpgrade allows self managed cluster upgrades through the API.
func ExperimentalSelfManagedClusterUpgrade() Feature {
return Feature{
Name: "[EXPERIMENTAL] Upgrade self-managed clusters through the API",
IsActive: globalFeatures.isActiveForEnvVarOrGate(
ExperimentalSelfManagedClusterUpgradeEnvVar,
experimentalSelfManagedClusterUpgradeGate,
),
}
}
func CloudStackKubeVipDisabled() Feature {
return Feature{
Name: "Kube-vip support disabled in CloudStack provider",
IsActive: globalFeatures.isActiveForEnvVar(CloudStackKubeVipDisabledEnvVar),
}
}
func CheckpointEnabled() Feature {
return Feature{
Name: "Checkpoint to rerun commands enabled",
IsActive: globalFeatures.isActiveForEnvVar(CheckpointEnabledEnvVar),
}
}
func UseNewWorkflows() Feature {
return Feature{
Name: "Use new workflow logic for cluster management operations",
IsActive: globalFeatures.isActiveForEnvVar(UseNewWorkflowsEnvVar),
}
}
| 71 |
eks-anywhere | aws | Go | package features
import (
"fmt"
"os"
"sync"
"testing"
. "github.com/onsi/gomega"
)
const (
fakeFeatureEnvVar = "fakeFeatureEnvVar"
fakeFeatureGate = "fakeFeatureGate"
)
func fakeFeature() Feature {
return Feature{
Name: "Core components upgrade",
IsActive: globalFeatures.isActiveForEnvVar(fakeFeatureEnvVar),
}
}
func fakeFeatureWithGate() Feature {
return Feature{
Name: "Core components upgrade",
IsActive: globalFeatures.isActiveForEnvVarOrGate(fakeFeatureEnvVar, fakeFeatureGate),
}
}
func setupContext(t *testing.T) {
t.Cleanup(func() {
// cleanup cache
globalFeatures.cache = newMutexMap()
globalFeatures.initGates = sync.Once{}
})
}
func TestIsActiveEnvVarUnset(t *testing.T) {
g := NewWithT(t)
setupContext(t)
os.Unsetenv(fakeFeatureEnvVar)
g.Expect(IsActive(fakeFeature())).To(BeFalse())
}
func TestIsActiveEnvVarSetFalse(t *testing.T) {
g := NewWithT(t)
setupContext(t)
t.Setenv(fakeFeatureEnvVar, "false")
g.Expect(IsActive(fakeFeature())).To(BeFalse())
}
func TestIsActiveEnvVarSetTrue(t *testing.T) {
g := NewWithT(t)
setupContext(t)
t.Setenv(fakeFeatureEnvVar, "true")
g.Expect(IsActive(fakeFeature())).To(BeTrue())
}
func TestIsActiveWithFeatureGatesTrue(t *testing.T) {
g := NewWithT(t)
setupContext(t)
featureGates := []string{"gate1=", "gate2=false", fmt.Sprintf("%s=true", fakeFeatureGate), ""}
FeedGates(featureGates)
g.Expect(IsActive(fakeFeatureWithGate())).To(BeTrue())
}
| 72 |
eks-anywhere | aws | Go | package features
import (
"os"
"strings"
"sync"
)
var globalFeatures = newFeatures()
type features struct {
cache *mutexMap
gates map[string]string
initGates sync.Once
}
func newFeatures() *features {
return &features{
cache: newMutexMap(),
gates: map[string]string{},
}
}
func (f *features) feedGates(featureGates []string) {
f.initGates.Do(func() {
for _, gate := range featureGates {
pairs := strings.SplitN(gate, "=", 2)
if len(pairs) != 2 {
continue
}
f.gates[pairs[0]] = pairs[1]
}
})
}
func (f *features) isActiveForEnvVar(envVar string) func() bool {
return func() bool {
active, ok := f.cache.load(envVar)
if !ok {
active = os.Getenv(envVar) == "true"
f.cache.store(envVar, active)
}
return active
}
}
func (f *features) isActiveForEnvVarOrGate(envVar, gateName string) func() bool {
return func() bool {
active, ok := f.cache.load(envVar)
if !ok {
value, present := os.LookupEnv(envVar)
if !present {
value = f.gates[gateName]
}
active = value == "true"
f.cache.store(envVar, active)
}
return active
}
}
func (f *features) clearCache() {
f.cache.clear()
}
| 69 |
eks-anywhere | aws | Go | package features
import "sync"
func newMutexMap() *mutexMap {
return &mutexMap{
internal: make(map[string]bool),
}
}
type mutexMap struct {
internal map[string]bool
sync.RWMutex
}
func (m *mutexMap) load(key string) (value bool, ok bool) {
m.RLock()
result, ok := m.internal[key]
m.RUnlock()
return result, ok
}
func (m *mutexMap) store(key string, value bool) {
m.Lock()
m.internal[key] = value
m.Unlock()
}
func (m *mutexMap) clear() {
m.Lock()
m.internal = make(map[string]bool)
m.Unlock()
}
| 34 |
eks-anywhere | aws | Go | package features
import (
"testing"
. "github.com/onsi/gomega"
)
func TestMutexMapLoadAndStore(t *testing.T) {
g := NewWithT(t)
m := newMutexMap()
key := "key"
value := true
v, ok := m.load(key)
g.Expect(ok).To(BeFalse())
g.Expect(v).To(BeFalse())
m.store(key, value)
v, ok = m.load(key)
g.Expect(ok).To(BeTrue())
g.Expect(v).To(Equal(value))
}
func TestMutexMapClear(t *testing.T) {
g := NewWithT(t)
m := newMutexMap()
key := "key"
value := true
m.store(key, value)
v, ok := m.load(key)
g.Expect(ok).To(BeTrue())
g.Expect(v).To(Equal(value))
m.clear()
v, ok = m.load(key)
g.Expect(ok).To(BeFalse())
g.Expect(v).To(BeFalse())
}
| 41 |
eks-anywhere | aws | Go | //go:build files_embed_fs
// +build files_embed_fs
package files
import "embed"
// embedFS is used to conditionally embed files in the binary. Only one
// of embed_config.go and empty_embed.go will be used, following
// `files_embed_fs` build tag.
//
//go:embed config
var embedFS embed.FS
| 14 |
eks-anywhere | aws | Go | //go:build !files_embed_fs
// +build !files_embed_fs
package files
import "embed"
// embedFS is used to conditionally embed files in the binary. Only one
// of embed_config.go and empty_embed.go will be used, following
// `files_embed_fs` build tag.
var embedFS embed.FS
| 12 |
eks-anywhere | aws | Go | package files
import (
"crypto/tls"
"crypto/x509"
"embed"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
"golang.org/x/net/http/httpproxy"
)
const (
httpsScheme = "https"
embedScheme = "embed"
)
type Reader struct {
embedFS embed.FS
httpClient *http.Client
userAgent string
}
type ReaderOpt func(*Reader)
func WithEmbedFS(embedFS embed.FS) ReaderOpt {
return func(s *Reader) {
s.embedFS = embedFS
}
}
func WithUserAgent(userAgent string) ReaderOpt {
return func(s *Reader) {
s.userAgent = userAgent
}
}
// WithEKSAUserAgent sets the user agent for a particular eks-a component and version.
// component should be something like "cli", "controller", "e2e", etc.
// version should generally be a semver, but when not available, any string is valid.
func WithEKSAUserAgent(eksAComponent, version string) ReaderOpt {
return WithUserAgent(eksaUserAgent(eksAComponent, version))
}
// WithRootCACerts configures the HTTP client's trusted CAs. Note that this will overwrite
// the defaults so the host's trust will be ignored. This option is only for testing.
func WithRootCACerts(certs []*x509.Certificate) ReaderOpt {
return func(r *Reader) {
t := r.httpClient.Transport.(*http.Transport)
if t.TLSClientConfig == nil {
t.TLSClientConfig = &tls.Config{}
}
if t.TLSClientConfig.RootCAs == nil {
t.TLSClientConfig.RootCAs = x509.NewCertPool()
}
for _, c := range certs {
t.TLSClientConfig.RootCAs.AddCert(c)
}
}
}
// WithNonCachedProxyConfig configures the HTTP client to read the Proxy configuration
// from the enviroment on every request instead of relying on the default package
// level cache (implemented in the http package with envProxyFuncValue), which is only
// read once. If Proxy is not configured in the client's transport, nothing is changed.
// This is only for testing.
func WithNonCachedProxyConfig() ReaderOpt {
return func(r *Reader) {
t := r.httpClient.Transport.(*http.Transport)
if t.Proxy == nil {
return
}
t.Proxy = func(r *http.Request) (*url.URL, error) {
return httpproxy.FromEnvironment().ProxyFunc()(r.URL)
}
}
}
func NewReader(opts ...ReaderOpt) *Reader {
// In order to modify the TLSHandshakeTimeout we first clone the default transport.
// It has some defaults that we want to preserve. In particular Proxy, which is set
// to http.ProxyFromEnvironment. This will make the client honor the HTTP_PROXY,
// HTTPS_PROXY and NO_PROXY env variables.
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSHandshakeTimeout = 60 * time.Second
client := &http.Client{
Transport: transport,
}
r := &Reader{
embedFS: embedFS,
httpClient: client,
userAgent: eksaUserAgent("unknown", "no-version"),
}
for _, o := range opts {
o(r)
}
return r
}
func (r *Reader) ReadFile(uri string) ([]byte, error) {
url, err := url.Parse(uri)
if err != nil {
return nil, fmt.Errorf("can't build cluster spec, invalid release manifest url: %v", err)
}
switch url.Scheme {
case httpsScheme:
return r.readHttpFile(uri)
case embedScheme:
return r.readEmbedFile(url)
default:
return readLocalFile(uri)
}
}
func (r *Reader) readHttpFile(uri string) ([]byte, error) {
request, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, fmt.Errorf("failed creating http GET request for downloading file: %v", err)
}
request.Header.Set("User-Agent", r.userAgent)
resp, err := r.httpClient.Do(request)
if err != nil {
return nil, fmt.Errorf("failed reading file from url [%s]: %v", uri, err)
}
defer resp.Body.Close()
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed reading file from url [%s]: %v", uri, err)
}
return data, nil
}
func (r *Reader) readEmbedFile(url *url.URL) ([]byte, error) {
data, err := r.embedFS.ReadFile(strings.TrimPrefix(url.Path, "/"))
if err != nil {
return nil, fmt.Errorf("failed reading embed file [%s] for cluster spec: %v", url.Path, err)
}
return data, nil
}
func readLocalFile(filename string) ([]byte, error) {
data, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("failed reading local file [%s]: %v", filename, err)
}
return data, nil
}
func eksaUserAgent(eksAComponent, version string) string {
return fmt.Sprintf("eks-a-%s/%s", eksAComponent, version)
}
| 169 |
eks-anywhere | aws | Go | package files_test
import (
"crypto/x509"
"embed"
"io"
"net"
"net/http"
"net/http/httptest"
"net/url"
"sync"
"testing"
"time"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/files"
)
//go:embed testdata
var testdataFS embed.FS
func TestReaderReadFileError(t *testing.T) {
tests := []struct {
testName string
uri string
filePath string
}{
{
testName: "missing local file",
uri: "fake-local-file.yaml",
},
{
testName: "missing embed file",
uri: "embed:///fake-local-file.yaml",
},
{
testName: "invalid uri",
uri: ":domain.com/",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
r := files.NewReader()
_, err := r.ReadFile(tt.uri)
g.Expect(err).NotTo(BeNil())
})
}
}
func TestReaderReadFileSuccess(t *testing.T) {
tests := []struct {
testName string
uri string
filePath string
}{
{
testName: "local file",
uri: "testdata/file.yaml",
filePath: "testdata/file.yaml",
},
{
testName: "embed file",
uri: "embed:///testdata/file.yaml",
filePath: "testdata/file.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
r := files.NewReader(files.WithEmbedFS(testdataFS))
got, err := r.ReadFile(tt.uri)
g.Expect(err).To(BeNil())
test.AssertContentToFile(t, string(got), tt.filePath)
})
}
}
func TestReaderReadFileHTTPSSuccess(t *testing.T) {
g := NewWithT(t)
filePath := "testdata/file.yaml"
server := test.NewHTTPSServerForFile(t, filePath)
uri := server.URL + "/" + filePath
r := files.NewReader(files.WithRootCACerts(serverCerts(g, server)))
got, err := r.ReadFile(uri)
g.Expect(err).To(BeNil())
test.AssertContentToFile(t, string(got), filePath)
}
func TestReaderReadFileHTTPSProxySuccess(t *testing.T) {
t.Run("prepapre", func(t *testing.T) {
g := NewWithT(t)
filePath := "testdata/file.yaml"
server := test.NewHTTPSServerForFile(t, filePath)
uri := server.URL + "/" + filePath
r := files.NewReader(files.WithRootCACerts(serverCerts(g, server)))
got, err := r.ReadFile(uri)
g.Expect(err).To(BeNil())
test.AssertContentToFile(t, string(got), filePath)
})
g := NewWithT(t)
filePath := "testdata/file.yaml"
// It's important to use example.com because the certificate created for
// the TLS server is only valid for this domain and 127.0.0.1.
fakeServerHost := "example.com:443"
fileURL := "https://" + fakeServerHost + "/" + filePath
server := test.NewHTTPSServerForFile(t, filePath)
serverHost := serverHost(g, server)
// We need to use the proxy server to do a host "swapping".
// The test server created by NewHTTPSServerForFile will be listening in
// 127.0.0.1. However, the Go documentation for the transport.Proxy states that:
// > if req.URL.Host is "localhost" or a loopback address (with or without
// > a port number), then a nil URL and nil error will be returned.
// https://pkg.go.dev/golang.org/x/net/http/httpproxy#Config.ProxyFunc
// Which means that it will never honor the HTTPS_PROXY env var since our
// request will be pointing to a loopback address.
// In order to make it work, we pass example.com in our request and use the
// roxy to map this domain to 127.0.0.1, where our file server is listening.
hostMappings := map[string]string{fakeServerHost: serverHost}
proxy := newProxyServer(t, hostMappings)
t.Setenv("HTTPS_PROXY", proxy.URL)
r := files.NewReader(
files.WithRootCACerts(serverCerts(g, server)),
files.WithNonCachedProxyConfig(),
)
got, err := r.ReadFile(fileURL)
g.Expect(err).To(BeNil())
test.AssertContentToFile(t, string(got), filePath)
g.Expect(proxy.countForHost(serverHost)).To(
Equal(1), "Host %s should have been proxied exactly once", serverHost,
)
}
func serverCerts(g Gomega, server *httptest.Server) []*x509.Certificate {
certs := []*x509.Certificate{}
for _, c := range server.TLS.Certificates {
roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1])
g.Expect(err).NotTo(HaveOccurred())
certs = append(certs, roots...)
}
return certs
}
func serverHost(g Gomega, server *httptest.Server) string {
u, err := url.Parse(server.URL)
g.Expect(err).NotTo(HaveOccurred())
return u.Host
}
type proxyServer struct {
*httptest.Server
*proxy
}
func newProxyServer(tb testing.TB, hostMappings map[string]string) *proxyServer {
proxyServer := &proxyServer{
proxy: newProxy(hostMappings),
}
proxyServer.Server = httptest.NewServer(http.HandlerFunc(proxyServer.handleProxy))
tb.Cleanup(func() {
proxyServer.Close()
})
return proxyServer
}
type proxy struct {
sync.Mutex
// proxied maitains a count of how many proxied requests\
// have been completed per host.
proxied map[string]int
// hostMappings allows to map the dst host in the CONNECT
// request to a different host.
hostMappings map[string]string
}
func newProxy(hostMappings map[string]string) *proxy {
return &proxy{
proxied: map[string]int{},
hostMappings: hostMappings,
}
}
func (p *proxy) handleProxy(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodConnect {
p.tunnelConnection(w, r)
} else {
http.Error(w, "Only supports CONNECT", http.StatusMethodNotAllowed)
}
}
func (p *proxy) tunnelConnection(w http.ResponseWriter, r *http.Request) {
host := r.Host
if mappedDstHost, ok := p.hostMappings[host]; ok {
host = mappedDstHost
}
destConn, err := net.DialTimeout("tcp", host, 2*time.Second)
if err != nil {
http.Error(w, err.Error(), http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
h, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "Hijacking is not supported", http.StatusInternalServerError)
return
}
clientConn, _, err := h.Hijack()
if err != nil {
http.Error(w, err.Error(), http.StatusServiceUnavailable)
}
p.countRequest(host)
go pipe(w, destConn, clientConn)
go pipe(w, clientConn, destConn)
}
// countRequest increases the proxied counter for the given host.
func (p *proxy) countRequest(host string) {
p.Lock()
defer p.Unlock()
p.proxied[host] = p.proxied[host] + 1
}
// countForHost returns the number of time a particular host has been proxied.
func (p *proxy) countForHost(host string) int {
p.Lock()
defer p.Unlock()
return p.proxied[host]
}
func pipe(w http.ResponseWriter, destination io.WriteCloser, source io.ReadCloser) {
defer destination.Close()
defer source.Close()
if _, err := io.Copy(destination, source); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
| 260 |
eks-anywhere | aws | Go | package files
import (
"testing"
. "github.com/onsi/gomega"
)
func TestWithEKSAUserAgent(t *testing.T) {
g := NewWithT(t)
r := NewReader(WithEKSAUserAgent("cli", "v0.10.0"))
g.Expect(r.userAgent).To(Equal("eks-a-cli/v0.10.0"))
}
| 14 |
eks-anywhere | aws | Go | package filewriter
import (
"io"
"os"
)
type FileWriter interface {
Write(fileName string, content []byte, f ...FileOptionsFunc) (path string, err error)
WithDir(dir string) (FileWriter, error)
CleanUp()
CleanUpTemp()
Dir() string
TempDir() string
Create(name string, f ...FileOptionsFunc) (_ io.WriteCloser, path string, _ error)
}
type FileOptions struct {
IsTemp bool
Permissions os.FileMode
}
type FileOptionsFunc func(op *FileOptions)
| 24 |
eks-anywhere | aws | Go | package filewriter
import (
"os"
)
const DefaultTmpFolder = "generated"
func defaultFileOptions() *FileOptions {
return &FileOptions{true, os.ModePerm}
}
func Permission0600(op *FileOptions) {
op.Permissions = 0o600
}
func PersistentFile(op *FileOptions) {
op.IsTemp = false
}
| 20 |
eks-anywhere | aws | Go | package filewriter_test
import (
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/aws/eks-anywhere/pkg/filewriter"
)
func TestTmpWriterWriteValid(t *testing.T) {
folder := "tmp_folder"
folder2 := "tmp_folder_2"
err := os.MkdirAll(folder2, os.ModePerm)
if err != nil {
t.Fatalf("error setting up test: %v", err)
}
defer os.RemoveAll(folder)
defer os.RemoveAll(folder2)
tests := []struct {
testName string
dir string
fileName string
content []byte
}{
{
testName: "dir doesn't exist",
dir: folder,
fileName: "TestTmpWriterWriteValid-success.yaml",
content: []byte(`
fake content
blablab
`),
},
{
testName: "dir exists",
dir: folder2,
fileName: "test",
content: []byte(`
fake content
blablab
`),
},
{
testName: "empty file name",
dir: folder,
fileName: "test",
content: []byte(`
fake content
blablab
`),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tr, err := filewriter.NewWriter(tt.dir)
if err != nil {
t.Fatalf("failed creating tmpWriter error = %v", err)
}
gotPath, err := tr.Write(tt.fileName, tt.content)
if err != nil {
t.Fatalf("tmpWriter.Write() error = %v", err)
}
if !strings.HasPrefix(gotPath, tt.dir) {
t.Errorf("tmpWriter.Write() = %v, want to start with %v", gotPath, tt.dir)
}
if !strings.HasSuffix(gotPath, tt.fileName) {
t.Errorf("tmpWriter.Write() = %v, want to end with %v", gotPath, tt.fileName)
}
content, err := os.ReadFile(gotPath)
if err != nil {
t.Fatalf("error reading written file: %v", err)
}
if string(content) != string(tt.content) {
t.Errorf("Write file content = %v, want %v", content, tt.content)
}
})
}
}
func TestTmpWriterWithDir(t *testing.T) {
rootFolder := "folder_root"
subFolder := "subFolder"
defer os.RemoveAll(rootFolder)
tr, err := filewriter.NewWriter(rootFolder)
if err != nil {
t.Fatalf("failed creating tmpWriter error = %v", err)
}
tr, err = tr.WithDir(subFolder)
if err != nil {
t.Fatalf("failed creating tmpWriter with subdir error = %v", err)
}
gotPath, err := tr.Write("file.txt", []byte("file content"))
if err != nil {
t.Fatalf("tmpWriter.Write() error = %v", err)
}
wantPathPrefix := filepath.Join(rootFolder, subFolder)
if !strings.HasPrefix(gotPath, wantPathPrefix) {
t.Errorf("tmpWriter.Write() = %v, want to start with %v", gotPath, wantPathPrefix)
}
}
func TestCreate(t *testing.T) {
dir := t.TempDir()
const fileName = "test.txt"
// Hard code the "generated". Its an implementation detail but we can't refactor it right now.
expectedPath := path.Join(dir, "generated", fileName)
expectedContent := []byte("test content")
fr, err := filewriter.NewWriter(dir)
if err != nil {
t.Fatal(err)
}
fh, path, err := fr.Create(fileName)
if err != nil {
t.Fatal(err)
}
// We need to validate 2 things: (1) are the paths returned correct; (2) if we write content
// to the returned io.WriteCloser, is it written to the path also returened from the function.
if path != expectedPath {
t.Fatalf("Received: %v; Expected: %v", path, expectedPath)
}
if _, err := fh.Write(expectedContent); err != nil {
t.Fatal(err)
}
if err := fh.Close(); err != nil {
t.Fatal(err)
}
content, err := os.ReadFile(expectedPath)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(content, expectedContent) {
t.Fatalf("Received: %v; Expected: %v", content, expectedContent)
}
}
| 159 |
eks-anywhere | aws | Go | package filewriter
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
)
type writer struct {
dir string
tempDir string
}
func NewWriter(dir string) (FileWriter, error) {
newFolder := filepath.Join(dir, DefaultTmpFolder)
if _, err := os.Stat(newFolder); errors.Is(err, os.ErrNotExist) {
err := os.MkdirAll(newFolder, os.ModePerm)
if err != nil {
return nil, fmt.Errorf("creating directory [%s]: %v", dir, err)
}
}
return &writer{dir: dir, tempDir: newFolder}, nil
}
func (w *writer) Write(fileName string, content []byte, opts ...FileOptionsFunc) (string, error) {
o := buildOptions(w, opts)
filePath := filepath.Join(o.BasePath, fileName)
err := os.WriteFile(filePath, content, o.Permissions)
if err != nil {
return "", fmt.Errorf("writing to file [%s]: %v", filePath, err)
}
return filePath, nil
}
func (w *writer) WithDir(dir string) (FileWriter, error) {
return NewWriter(filepath.Join(w.dir, dir))
}
func (w *writer) Dir() string {
return w.dir
}
func (w *writer) TempDir() string {
return w.tempDir
}
func (w *writer) CleanUp() {
_, err := os.Stat(w.dir)
if err == nil {
os.RemoveAll(w.dir)
}
}
func (w *writer) CleanUpTemp() {
_, err := os.Stat(w.tempDir)
if err == nil {
os.RemoveAll(w.tempDir)
}
}
// Create creates a file with the given name rooted at w's base directory.
func (w *writer) Create(name string, opts ...FileOptionsFunc) (_ io.WriteCloser, path string, _ error) {
o := buildOptions(w, opts)
path = filepath.Join(o.BasePath, name)
fh, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, o.Permissions)
return fh, path, err
}
type options struct {
BasePath string
Permissions fs.FileMode
}
// buildOptions converts a set of FileOptionsFunc's to a single options struct.
func buildOptions(w *writer, opts []FileOptionsFunc) options {
op := defaultFileOptions()
for _, fn := range opts {
fn(op)
}
var basePath string
if op.IsTemp {
basePath = w.tempDir
} else {
basePath = w.dir
}
return options{
BasePath: basePath,
Permissions: op.Permissions,
}
}
| 99 |
eks-anywhere | aws | Go | package filewriter_test
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/filewriter"
)
func TestWriterWriteValid(t *testing.T) {
folder := "tmp_folder"
folder2 := "tmp_folder_2"
err := os.MkdirAll(folder2, os.ModePerm)
if err != nil {
t.Fatalf("error setting up test: %v", err)
}
defer os.RemoveAll(folder)
defer os.RemoveAll(folder2)
tests := []struct {
testName string
dir string
fileName string
content []byte
}{
{
testName: "test 1",
dir: folder,
fileName: "TestWriterWriteValid-success.yaml",
content: []byte(`
fake content
blablab
`),
},
{
testName: "test 2",
dir: folder2,
fileName: "TestWriterWriteValid-success.yaml",
content: []byte(`
fake content
blablab
`),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tr, err := filewriter.NewWriter(tt.dir)
if err != nil {
t.Fatalf("failed creating writer error = %v", err)
}
gotPath, err := tr.Write(tt.fileName, tt.content)
if err != nil {
t.Fatalf("writer.Write() error = %v", err)
}
wantPath := filepath.Join(tt.dir, filewriter.DefaultTmpFolder, tt.fileName)
if strings.Compare(gotPath, wantPath) != 0 {
t.Errorf("writer.Write() = %v, want %v", gotPath, wantPath)
}
test.AssertFilesEquals(t, gotPath, wantPath)
})
}
}
func TestEmptyFileName(t *testing.T) {
folder := "tmp_folder"
defer os.RemoveAll(folder)
tr, err := filewriter.NewWriter(folder)
if err != nil {
t.Fatalf("failed creating writer error = %v", err)
}
_, err = tr.Write("", []byte("content"))
if err == nil {
t.Fatalf("writer.Write() error is nil")
}
}
func TestWriterWithDir(t *testing.T) {
rootFolder := "folder_root"
subFolder := "subFolder"
defer os.RemoveAll(rootFolder)
tr, err := filewriter.NewWriter(rootFolder)
if err != nil {
t.Fatalf("failed creating writer error = %v", err)
}
tr, err = tr.WithDir(subFolder)
if err != nil {
t.Fatalf("failed creating writer with subdir error = %v", err)
}
gotPath, err := tr.Write("file.txt", []byte("file content"))
if err != nil {
t.Fatalf("writer.Write() error = %v", err)
}
wantPathPrefix := filepath.Join(rootFolder, subFolder)
if !strings.HasPrefix(gotPath, wantPathPrefix) {
t.Errorf("writer.Write() = %v, want to start with %v", gotPath, wantPathPrefix)
}
}
func TestWriterWritePersistent(t *testing.T) {
folder := "tmp_folder_opt"
folder2 := "tmp_folder_2_opt"
err := os.MkdirAll(folder2, os.ModePerm)
if err != nil {
t.Fatalf("error setting up test: %v", err)
}
defer os.RemoveAll(folder)
defer os.RemoveAll(folder2)
tests := []struct {
testName string
dir string
fileName string
content []byte
options []filewriter.FileOptionsFunc
}{
{
testName: "Write persistent file",
dir: folder,
fileName: "TestWriterWriteValid-success.yaml",
content: []byte(`
fake content
blablab
`),
options: []filewriter.FileOptionsFunc{filewriter.PersistentFile},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tr, err := filewriter.NewWriter(tt.dir)
if err != nil {
t.Fatalf("failed creating writer error = %v", err)
}
gotPath, err := tr.Write(tt.fileName, tt.content, tt.options...)
if err != nil {
t.Fatalf("writer.Write() error = %v", err)
}
wantPath := filepath.Join(tt.dir, tt.fileName)
if strings.Compare(gotPath, wantPath) != 0 {
t.Errorf("writer.Write() = %v, want %v", gotPath, wantPath)
}
test.AssertFilesEquals(t, gotPath, wantPath)
})
}
}
func TestWriterDir(t *testing.T) {
rootFolder := "folder_root"
defer os.RemoveAll(rootFolder)
tr, err := filewriter.NewWriter(rootFolder)
if err != nil {
t.Fatalf("failed creating writer error = %v", err)
}
if strings.Compare(tr.Dir(), rootFolder) != 0 {
t.Errorf("writer.Dir() = %v, want %v", tr.Dir(), rootFolder)
}
}
func TestWriterTempDir(t *testing.T) {
rootFolder := "folder_root"
tempFolder := fmt.Sprintf("%s/generated", rootFolder)
defer os.RemoveAll(rootFolder)
tr, err := filewriter.NewWriter(rootFolder)
if err != nil {
t.Fatalf("failed creating writer error = %v", err)
}
if strings.Compare(tr.TempDir(), tempFolder) != 0 {
t.Errorf("writer.TempDir() = %v, want %v", tr.TempDir(), tempFolder)
}
}
func TestWriterCleanUpTempDir(t *testing.T) {
rootFolder := "folder_root"
defer os.RemoveAll(rootFolder)
tr, err := filewriter.NewWriter(rootFolder)
if err != nil {
t.Fatalf("failed creating writer error = %v", err)
}
tr.CleanUpTemp()
if _, err := os.Stat(tr.TempDir()); err == nil {
t.Errorf("writer.CleanUp(), want err, got nil")
}
}
| 204 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/filewriter (interfaces: FileWriter)
// Package mocks is a generated GoMock package.
package mocks
import (
io "io"
reflect "reflect"
filewriter "github.com/aws/eks-anywhere/pkg/filewriter"
gomock "github.com/golang/mock/gomock"
)
// MockFileWriter is a mock of FileWriter interface.
type MockFileWriter struct {
ctrl *gomock.Controller
recorder *MockFileWriterMockRecorder
}
// MockFileWriterMockRecorder is the mock recorder for MockFileWriter.
type MockFileWriterMockRecorder struct {
mock *MockFileWriter
}
// NewMockFileWriter creates a new mock instance.
func NewMockFileWriter(ctrl *gomock.Controller) *MockFileWriter {
mock := &MockFileWriter{ctrl: ctrl}
mock.recorder = &MockFileWriterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFileWriter) EXPECT() *MockFileWriterMockRecorder {
return m.recorder
}
// CleanUp mocks base method.
func (m *MockFileWriter) CleanUp() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "CleanUp")
}
// CleanUp indicates an expected call of CleanUp.
func (mr *MockFileWriterMockRecorder) CleanUp() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanUp", reflect.TypeOf((*MockFileWriter)(nil).CleanUp))
}
// CleanUpTemp mocks base method.
func (m *MockFileWriter) CleanUpTemp() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "CleanUpTemp")
}
// CleanUpTemp indicates an expected call of CleanUpTemp.
func (mr *MockFileWriterMockRecorder) CleanUpTemp() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanUpTemp", reflect.TypeOf((*MockFileWriter)(nil).CleanUpTemp))
}
// Create mocks base method.
func (m *MockFileWriter) Create(arg0 string, arg1 ...filewriter.FileOptionsFunc) (io.WriteCloser, string, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Create", varargs...)
ret0, _ := ret[0].(io.WriteCloser)
ret1, _ := ret[1].(string)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// Create indicates an expected call of Create.
func (mr *MockFileWriterMockRecorder) Create(arg0 interface{}, arg1 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0}, arg1...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockFileWriter)(nil).Create), varargs...)
}
// Dir mocks base method.
func (m *MockFileWriter) Dir() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Dir")
ret0, _ := ret[0].(string)
return ret0
}
// Dir indicates an expected call of Dir.
func (mr *MockFileWriterMockRecorder) Dir() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dir", reflect.TypeOf((*MockFileWriter)(nil).Dir))
}
// TempDir mocks base method.
func (m *MockFileWriter) TempDir() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TempDir")
ret0, _ := ret[0].(string)
return ret0
}
// TempDir indicates an expected call of TempDir.
func (mr *MockFileWriterMockRecorder) TempDir() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TempDir", reflect.TypeOf((*MockFileWriter)(nil).TempDir))
}
// WithDir mocks base method.
func (m *MockFileWriter) WithDir(arg0 string) (filewriter.FileWriter, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithDir", arg0)
ret0, _ := ret[0].(filewriter.FileWriter)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WithDir indicates an expected call of WithDir.
func (mr *MockFileWriterMockRecorder) WithDir(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithDir", reflect.TypeOf((*MockFileWriter)(nil).WithDir), arg0)
}
// Write mocks base method.
func (m *MockFileWriter) Write(arg0 string, arg1 []byte, arg2 ...filewriter.FileOptionsFunc) (string, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Write", varargs...)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Write indicates an expected call of Write.
func (mr *MockFileWriterMockRecorder) Write(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockFileWriter)(nil).Write), varargs...)
}
| 145 |
eks-anywhere | aws | Go | package git
import (
"context"
"fmt"
)
type Client interface {
Add(filename string) error
Remove(filename string) error
Clone(ctx context.Context) error
Commit(message string) error
Push(ctx context.Context) error
Pull(ctx context.Context, branch string) error
Init() error
Branch(name string) error
ValidateRemoteExists(ctx context.Context) error
}
type ProviderClient interface {
GetRepo(ctx context.Context) (repo *Repository, err error)
CreateRepo(ctx context.Context, opts CreateRepoOpts) (repo *Repository, err error)
DeleteRepo(ctx context.Context, opts DeleteRepoOpts) error
AddDeployKeyToRepo(ctx context.Context, opts AddDeployKeyOpts) error
Validate(ctx context.Context) error
PathExists(ctx context.Context, owner, repo, branch, path string) (bool, error)
}
type CreateRepoOpts struct {
Name string
Owner string
Description string
Personal bool
Privacy bool
AutoInit bool
}
type GetRepoOpts struct {
Owner string
Repository string
}
type DeleteRepoOpts struct {
Owner string
Repository string
}
type AddDeployKeyOpts struct {
Owner string
Repository string
Key string
Title string
ReadOnly bool
}
type Repository struct {
Name string
Owner string
Organization string
CloneUrl string
}
type TokenAuth struct {
Username string
Token string
}
type RepositoryDoesNotExistError struct {
repository string
owner string
Err error
}
func (e *RepositoryDoesNotExistError) Error() string {
return fmt.Sprintf("repository %s with owner %s not found: %s", e.repository, e.owner, e.Err)
}
type RepositoryIsEmptyError struct {
Repository string
}
func (e *RepositoryIsEmptyError) Error() string {
return fmt.Sprintf("repository %s is empty can cannot be cloned", e.Repository)
}
type RepositoryUpToDateError struct{}
func (e *RepositoryUpToDateError) Error() string {
return "error pulling from repository: already up-to-date"
}
type RemoteBranchDoesNotExistError struct {
Repository string
Branch string
}
func (e *RemoteBranchDoesNotExistError) Error() string {
return fmt.Sprintf("error pulling from repository %s: remote branch %s does not exist", e.Repository, e.Branch)
}
| 100 |
eks-anywhere | aws | Go | package gitfactory
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
gogitssh "github.com/go-git/go-git/v5/plumbing/transport/ssh"
"golang.org/x/crypto/ssh"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/git/gitclient"
"github.com/aws/eks-anywhere/pkg/git/gogithub"
"github.com/aws/eks-anywhere/pkg/git/providers/github"
)
type GitTools struct {
Provider git.ProviderClient
Client git.Client
Writer filewriter.FileWriter
RepositoryDirectory string
}
type GitToolsOpt func(opts *GitTools)
func Build(ctx context.Context, cluster *v1alpha1.Cluster, fluxConfig *v1alpha1.FluxConfig, writer filewriter.FileWriter, opts ...GitToolsOpt) (*GitTools, error) {
var repo string
var repoUrl string
var gitAuth transport.AuthMethod
var err error
var tools GitTools
switch {
case fluxConfig.Spec.Github != nil:
githubToken, err := github.GetGithubAccessTokenFromEnv()
if err != nil {
return nil, err
}
tools.Provider, err = buildGithubProvider(ctx, githubToken, fluxConfig.Spec.Github)
if err != nil {
return nil, fmt.Errorf("building github provider: %v", err)
}
gitAuth = &http.BasicAuth{Password: githubToken, Username: fluxConfig.Spec.Github.Owner}
repo = fluxConfig.Spec.Github.Repository
repoUrl = github.RepoUrl(fluxConfig.Spec.Github.Owner, repo)
case fluxConfig.Spec.Git != nil:
privateKeyFile := os.Getenv(config.EksaGitPrivateKeyTokenEnv)
privateKeyPassphrase := os.Getenv(config.EksaGitPassphraseTokenEnv)
gitKnownHosts := os.Getenv(config.EksaGitKnownHostsFileEnv)
if err = os.Setenv(config.SshKnownHostsEnv, gitKnownHosts); err != nil {
return nil, fmt.Errorf("unable to set %s: %v", config.SshKnownHostsEnv, err)
}
gitAuth, err = getSshAuthFromPrivateKey(privateKeyFile, privateKeyPassphrase)
if err != nil {
return nil, err
}
repoUrl = fluxConfig.Spec.Git.RepositoryUrl
repo = path.Base(strings.TrimSuffix(repoUrl, filepath.Ext(repoUrl)))
default:
return nil, fmt.Errorf("no valid git provider in FluxConfigSpec. Spec: %v", fluxConfig)
}
tools.RepositoryDirectory = filepath.Join(cluster.Name, "git", repo)
for _, opt := range opts {
if opt != nil {
opt(&tools)
}
}
tools.Client = buildGitClient(ctx, gitAuth, repoUrl, tools.RepositoryDirectory)
tools.Writer, err = newRepositoryWriter(writer, repo)
if err != nil {
return nil, err
}
return &tools, nil
}
func buildGitClient(ctx context.Context, auth transport.AuthMethod, repoUrl string, repo string) *gitclient.GitClient {
opts := []gitclient.Opt{
gitclient.WithRepositoryUrl(repoUrl),
gitclient.WithRepositoryDirectory(repo),
gitclient.WithAuth(auth),
}
return gitclient.New(opts...)
}
func buildGithubProvider(ctx context.Context, githubToken string, config *v1alpha1.GithubProviderConfig) (git.ProviderClient, error) {
auth := git.TokenAuth{Token: githubToken, Username: config.Owner}
gogithubOpts := gogithub.Options{Auth: auth}
githubProviderClient := gogithub.New(ctx, gogithubOpts)
provider, err := github.New(githubProviderClient, config, auth)
if err != nil {
return nil, err
}
return provider, nil
}
func newRepositoryWriter(writer filewriter.FileWriter, repository string) (filewriter.FileWriter, error) {
localGitWriterPath := filepath.Join("git", repository)
gitwriter, err := writer.WithDir(localGitWriterPath)
if err != nil {
return nil, fmt.Errorf("creating file writer: %v", err)
}
gitwriter.CleanUpTemp()
return gitwriter, nil
}
func WithRepositoryDirectory(repoDir string) GitToolsOpt {
return func(opts *GitTools) {
opts.RepositoryDirectory = repoDir
}
}
func getSshAuthFromPrivateKey(privateKeyFile string, passphrase string) (gogitssh.AuthMethod, error) {
signer, err := getSignerFromPrivateKeyFile(privateKeyFile, passphrase)
if err != nil {
return nil, err
}
return &gogitssh.PublicKeys{
Signer: signer,
User: "git",
}, nil
}
func getSignerFromPrivateKeyFile(privateKeyFile string, passphrase string) (ssh.Signer, error) {
var signer ssh.Signer
var err error
sshKey, err := os.ReadFile(privateKeyFile)
if err != nil {
return nil, err
}
if passphrase == "" {
signer, err = ssh.ParsePrivateKey(sshKey)
if err != nil {
if _, ok := err.(*ssh.PassphraseMissingError); ok {
return nil, fmt.Errorf("%s, please set the EKSA_GIT_SSH_KEY_PASSPHRASE environment variable", err)
}
return nil, err
}
return signer, nil
}
return ssh.ParsePrivateKeyWithPassphrase(sshKey, []byte(passphrase))
}
| 159 |
eks-anywhere | aws | Go | package gitfactory_test
import (
"context"
"testing"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
gitFactory "github.com/aws/eks-anywhere/pkg/git/factory"
"github.com/aws/eks-anywhere/pkg/git/providers/github"
)
const (
validPATValue = "ghp_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
)
func TestGitFactoryHappyPath(t *testing.T) {
tests := []struct {
testName string
authTokenEnv string
opt gitFactory.GitToolsOpt
}{
{
testName: "valid token var",
authTokenEnv: validPATValue,
},
{
testName: "valid token var with opt",
authTokenEnv: validPATValue,
opt: gitFactory.WithRepositoryDirectory("test"),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
setupContext(t)
gitProviderConfig := v1alpha1.GithubProviderConfig{
Owner: "Jeff",
Repository: "testRepo",
Personal: true,
}
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "testCluster",
},
}
fluxConfig := &v1alpha1.FluxConfig{
Spec: v1alpha1.FluxConfigSpec{
Github: &gitProviderConfig,
},
}
_, w := test.NewWriter(t)
_, err := gitFactory.Build(context.Background(), cluster, fluxConfig, w, tt.opt)
if err != nil {
t.Errorf("gitfactory.BuldProvider returned err, wanted nil. err: %v", err)
}
})
}
}
func setupContext(t *testing.T) {
t.Setenv(github.EksaGithubTokenEnv, validPATValue)
t.Setenv(github.GithubTokenEnv, validPATValue)
}
| 72 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/git/factory/gitfactory.go
// Package mocks is a generated GoMock package.
package mocks
| 6 |
eks-anywhere | aws | Go | package gitclient
import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
gogit "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/storage/memory"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/retrier"
)
const (
gitTimeout = 30 * time.Second
maxRetries = 5
backOffPeriod = 5 * time.Second
emptyRepoError = "remote repository is empty"
)
type GitClient struct {
Auth transport.AuthMethod
Client GoGit
RepoUrl string
RepoDirectory string
Retrier *retrier.Retrier
}
type Opt func(*GitClient)
func New(opts ...Opt) *GitClient {
c := &GitClient{
Client: &goGit{},
Retrier: retrier.NewWithMaxRetries(maxRetries, backOffPeriod),
}
for _, opt := range opts {
opt(c)
}
return c
}
func WithAuth(auth transport.AuthMethod) Opt {
return func(c *GitClient) {
c.Auth = auth
}
}
func WithRepositoryUrl(repoUrl string) Opt {
return func(c *GitClient) {
c.RepoUrl = repoUrl
}
}
func WithRepositoryDirectory(repoDir string) Opt {
return func(c *GitClient) {
c.RepoDirectory = repoDir
}
}
func (g *GitClient) Clone(ctx context.Context) error {
_, err := g.Client.Clone(ctx, g.RepoDirectory, g.RepoUrl, g.Auth)
if err != nil && strings.Contains(err.Error(), emptyRepoError) {
return &git.RepositoryIsEmptyError{
Repository: g.RepoDirectory,
}
}
return err
}
func (g *GitClient) Add(filename string) error {
logger.V(3).Info("Opening directory", "directory", g.RepoDirectory)
r, err := g.Client.OpenDir(g.RepoDirectory)
if err != nil {
return err
}
logger.V(3).Info("Opening working tree")
w, err := g.Client.OpenWorktree(r)
if err != nil {
return err
}
logger.V(3).Info("Tracking specified files", "file", filename)
err = g.Client.AddGlob(filename, w)
return err
}
func (g *GitClient) Remove(filename string) error {
logger.V(3).Info("Opening directory", "directory", g.RepoDirectory)
r, err := g.Client.OpenDir(g.RepoDirectory)
if err != nil {
return err
}
logger.V(3).Info("Opening working tree")
w, err := g.Client.OpenWorktree(r)
if err != nil {
return err
}
logger.V(3).Info("Removing specified files", "file", filename)
_, err = g.Client.Remove(filename, w)
return err
}
func (g *GitClient) Commit(message string) error {
logger.V(3).Info("Opening directory", "directory", g.RepoDirectory)
r, err := g.Client.OpenDir(g.RepoDirectory)
if err != nil {
logger.Info("Failed while attempting to open repo")
return err
}
logger.V(3).Info("Opening working tree")
w, err := g.Client.OpenWorktree(r)
if err != nil {
return err
}
logger.V(3).Info("Generating Commit object...")
commitSignature := &object.Signature{
Name: "EKS-A",
When: time.Now(),
}
commit, err := g.Client.Commit(message, commitSignature, w)
if err != nil {
return err
}
logger.V(3).Info("Committing Object to local repo", "repo", g.RepoDirectory)
finalizedCommit, err := g.Client.CommitObject(r, commit)
logger.Info("Finalized commit and committed to local repository", "hash", finalizedCommit.Hash)
return err
}
func (g *GitClient) Push(ctx context.Context) error {
logger.V(3).Info("Pushing to remote", "repo", g.RepoDirectory)
r, err := g.Client.OpenDir(g.RepoDirectory)
if err != nil {
return fmt.Errorf("err pushing: %v", err)
}
err = g.Client.PushWithContext(ctx, r, g.Auth)
if err != nil {
return fmt.Errorf("pushing: %v", err)
}
return err
}
func (g *GitClient) Pull(ctx context.Context, branch string) error {
logger.V(3).Info("Pulling from remote", "repo", g.RepoDirectory, "remote", gogit.DefaultRemoteName)
r, err := g.Client.OpenDir(g.RepoDirectory)
if err != nil {
return fmt.Errorf("pulling from remote: %v", err)
}
w, err := g.Client.OpenWorktree(r)
if err != nil {
return fmt.Errorf("pulling from remote: %v", err)
}
branchRef := plumbing.NewBranchReferenceName(branch)
err = g.Client.PullWithContext(ctx, w, g.Auth, branchRef)
if errors.Is(err, gogit.NoErrAlreadyUpToDate) {
logger.V(3).Info("Local repo already up-to-date", "repo", g.RepoDirectory, "remote", gogit.DefaultRemoteName)
return &git.RepositoryUpToDateError{}
}
if err != nil {
return fmt.Errorf("pulling from remote: %v", err)
}
ref, err := g.Client.Head(r)
if err != nil {
return fmt.Errorf("pulling from remote: %v", err)
}
commit, err := g.Client.CommitObject(r, ref.Hash())
if err != nil {
return fmt.Errorf("accessing latest commit after pulling from remote: %v", err)
}
logger.V(3).Info("Successfully pulled from remote", "repo", g.RepoDirectory, "remote", gogit.DefaultRemoteName, "latest commit", commit.Hash)
return nil
}
func (g *GitClient) Init() error {
r, err := g.Client.Init(g.RepoDirectory)
if err != nil {
return err
}
if _, err = g.Client.Create(r, g.RepoUrl); err != nil {
return fmt.Errorf("initializing repository: %v", err)
}
return nil
}
func (g *GitClient) Branch(name string) error {
r, err := g.Client.OpenDir(g.RepoDirectory)
if err != nil {
return fmt.Errorf("creating branch %s: %v", name, err)
}
localBranchRef := plumbing.NewBranchReferenceName(name)
branchOpts := &config.Branch{
Name: name,
Remote: gogit.DefaultRemoteName,
Merge: localBranchRef,
Rebase: "true",
}
err = g.Client.CreateBranch(r, branchOpts)
branchExistsLocally := errors.Is(err, gogit.ErrBranchExists)
if err != nil && !branchExistsLocally {
return fmt.Errorf("creating branch %s: %v", name, err)
}
if branchExistsLocally {
logger.V(3).Info("Branch already exists locally", "branch", name)
}
if !branchExistsLocally {
logger.V(3).Info("Branch does not exist locally", "branch", name)
headref, err := g.Client.Head(r)
if err != nil {
return fmt.Errorf("creating branch %s: %v", name, err)
}
h := headref.Hash()
err = g.Client.SetRepositoryReference(r, plumbing.NewHashReference(localBranchRef, h))
if err != nil {
return fmt.Errorf("creating branch %s: %v", name, err)
}
}
w, err := g.Client.OpenWorktree(r)
if err != nil {
return fmt.Errorf("creating branch %s: %v", name, err)
}
err = g.Client.Checkout(w, &gogit.CheckoutOptions{
Branch: plumbing.ReferenceName(localBranchRef.String()),
Force: true,
})
if err != nil {
return fmt.Errorf("creating branch %s: %v", name, err)
}
err = g.pullIfRemoteExists(r, w, name, localBranchRef)
if err != nil {
return fmt.Errorf("creating branch %s: %v", name, err)
}
return nil
}
func (g *GitClient) ValidateRemoteExists(ctx context.Context) error {
logger.V(3).Info("Validating git setup", "repoUrl", g.RepoUrl)
remote := g.Client.NewRemote(g.RepoUrl, gogit.DefaultRemoteName)
// Check if we are able to make a connection to the remote by attempting to list refs
_, err := g.Client.ListWithContext(ctx, remote, g.Auth)
if err != nil {
return fmt.Errorf("connecting with remote %v for repository: %v", gogit.DefaultRemoteName, err)
}
return nil
}
func (g *GitClient) pullIfRemoteExists(r *gogit.Repository, w *gogit.Worktree, branchName string, localBranchRef plumbing.ReferenceName) error {
err := g.Retrier.Retry(func() error {
remoteExists, err := g.remoteBranchExists(r, localBranchRef)
if err != nil {
return fmt.Errorf("checking if remote branch exists %s: %v", branchName, err)
}
if remoteExists {
err = g.Client.PullWithContext(context.Background(), w, g.Auth, localBranchRef)
if err != nil && !errors.Is(err, gogit.NoErrAlreadyUpToDate) && !errors.Is(err, gogit.ErrRemoteNotFound) {
return fmt.Errorf("pulling from remote when checking out existing branch %s: %v", branchName, err)
}
}
return nil
})
return err
}
func (g *GitClient) remoteBranchExists(r *gogit.Repository, localBranchRef plumbing.ReferenceName) (bool, error) {
reflist, err := g.Client.ListRemotes(r, g.Auth)
if err != nil {
if strings.Contains(err.Error(), emptyRepoError) {
return false, nil
}
return false, fmt.Errorf("listing remotes: %v", err)
}
lb := localBranchRef.String()
for _, ref := range reflist {
if ref.Name().String() == lb {
return true, nil
}
}
return false, nil
}
type GoGit interface {
AddGlob(f string, w *gogit.Worktree) error
Checkout(w *gogit.Worktree, opts *gogit.CheckoutOptions) error
Clone(ctx context.Context, dir string, repoUrl string, auth transport.AuthMethod) (*gogit.Repository, error)
Commit(m string, sig *object.Signature, w *gogit.Worktree) (plumbing.Hash, error)
CommitObject(r *gogit.Repository, h plumbing.Hash) (*object.Commit, error)
Create(r *gogit.Repository, url string) (*gogit.Remote, error)
CreateBranch(r *gogit.Repository, config *config.Branch) error
Head(r *gogit.Repository) (*plumbing.Reference, error)
NewRemote(url, remoteName string) *gogit.Remote
Init(dir string) (*gogit.Repository, error)
OpenDir(dir string) (*gogit.Repository, error)
OpenWorktree(r *gogit.Repository) (*gogit.Worktree, error)
PushWithContext(ctx context.Context, r *gogit.Repository, auth transport.AuthMethod) error
PullWithContext(ctx context.Context, w *gogit.Worktree, auth transport.AuthMethod, ref plumbing.ReferenceName) error
ListRemotes(r *gogit.Repository, auth transport.AuthMethod) ([]*plumbing.Reference, error)
ListWithContext(ctx context.Context, r *gogit.Remote, auth transport.AuthMethod) ([]*plumbing.Reference, error)
Remove(f string, w *gogit.Worktree) (plumbing.Hash, error)
SetRepositoryReference(r *gogit.Repository, p *plumbing.Reference) error
}
type goGit struct{}
func (gg *goGit) Clone(ctx context.Context, dir string, repourl string, auth transport.AuthMethod) (*gogit.Repository, error) {
ctx, cancel := context.WithTimeout(ctx, gitTimeout)
defer cancel()
return gogit.PlainCloneContext(ctx, dir, false, &gogit.CloneOptions{
Auth: auth,
URL: repourl,
Progress: os.Stdout,
})
}
func (gg *goGit) OpenDir(dir string) (*gogit.Repository, error) {
return gogit.PlainOpen(dir)
}
func (gg *goGit) OpenWorktree(r *gogit.Repository) (*gogit.Worktree, error) {
return r.Worktree()
}
func (gg *goGit) AddGlob(f string, w *gogit.Worktree) error {
return w.AddGlob(f)
}
func (gg *goGit) Commit(m string, sig *object.Signature, w *gogit.Worktree) (plumbing.Hash, error) {
return w.Commit(m, &gogit.CommitOptions{
Author: sig,
})
}
func (gg *goGit) CommitObject(r *gogit.Repository, h plumbing.Hash) (*object.Commit, error) {
return r.CommitObject(h)
}
func (gg *goGit) PushWithContext(ctx context.Context, r *gogit.Repository, auth transport.AuthMethod) error {
ctx, cancel := context.WithTimeout(ctx, gitTimeout)
defer cancel()
return r.PushContext(ctx, &gogit.PushOptions{
Auth: auth,
})
}
func (gg *goGit) PullWithContext(ctx context.Context, w *gogit.Worktree, auth transport.AuthMethod, ref plumbing.ReferenceName) error {
ctx, cancel := context.WithTimeout(ctx, gitTimeout)
defer cancel()
return w.PullContext(ctx, &gogit.PullOptions{RemoteName: gogit.DefaultRemoteName, Auth: auth, ReferenceName: ref})
}
func (gg *goGit) Head(r *gogit.Repository) (*plumbing.Reference, error) {
return r.Head()
}
func (gg *goGit) Init(dir string) (*gogit.Repository, error) {
return gogit.PlainInit(dir, false)
}
func (ggc *goGit) NewRemote(url, remoteName string) *gogit.Remote {
return gogit.NewRemote(memory.NewStorage(), &config.RemoteConfig{
Name: remoteName,
URLs: []string{url},
})
}
func (gg *goGit) Checkout(worktree *gogit.Worktree, opts *gogit.CheckoutOptions) error {
return worktree.Checkout(opts)
}
func (gg *goGit) Create(r *gogit.Repository, url string) (*gogit.Remote, error) {
return r.CreateRemote(&config.RemoteConfig{
Name: gogit.DefaultRemoteName,
URLs: []string{url},
})
}
func (gg *goGit) CreateBranch(repo *gogit.Repository, config *config.Branch) error {
return repo.CreateBranch(config)
}
func (gg *goGit) ListRemotes(r *gogit.Repository, auth transport.AuthMethod) ([]*plumbing.Reference, error) {
remote, err := r.Remote(gogit.DefaultRemoteName)
if err != nil {
if errors.Is(err, gogit.ErrRemoteNotFound) {
return []*plumbing.Reference{}, nil
}
return nil, err
}
refList, err := remote.List(&gogit.ListOptions{Auth: auth})
if err != nil {
return nil, err
}
return refList, nil
}
func (gg *goGit) Remove(f string, w *gogit.Worktree) (plumbing.Hash, error) {
return w.Remove(f)
}
func (ggc *goGit) ListWithContext(ctx context.Context, r *gogit.Remote, auth transport.AuthMethod) ([]*plumbing.Reference, error) {
refList, err := r.ListContext(ctx, &gogit.ListOptions{Auth: auth})
if err != nil {
return nil, err
}
return refList, nil
}
func (gg *goGit) SetRepositoryReference(r *gogit.Repository, p *plumbing.Reference) error {
return r.Storer.SetReference(p)
}
| 447 |
eks-anywhere | aws | Go | package gitclient_test
import (
"context"
"fmt"
"reflect"
"testing"
goGit "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/git/gitclient"
mockGitClient "github.com/aws/eks-anywhere/pkg/git/gitclient/mocks"
)
const (
repoDir = "testrepo"
)
func TestGoGitClone(t *testing.T) {
tests := []struct {
name string
wantErr bool
throwError error
matchError error
}{
{
name: "clone repo success",
wantErr: false,
},
{
name: "empty repository error",
wantErr: true,
throwError: fmt.Errorf("remote repository is empty"),
matchError: &git.RepositoryIsEmptyError{
Repository: "testrepo",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx, client := newGoGitMock(t)
repoUrl := "testurl"
auth := &http.BasicAuth{}
g := &gitclient.GitClient{
RepoDirectory: repoDir,
RepoUrl: repoUrl,
Auth: auth,
Client: client,
}
client.EXPECT().Clone(ctx, repoDir, repoUrl, auth).Return(&goGit.Repository{}, tt.throwError)
err := g.Clone(ctx)
if (err != nil) != tt.wantErr {
t.Errorf("Clone() error = %v, wantErr = %v", err, tt.wantErr)
return
}
if tt.wantErr {
if !reflect.DeepEqual(err, tt.matchError) {
t.Errorf("Clone() error = %v, matchError %v", err, tt.matchError)
}
}
})
}
}
func TestGoGitAdd(t *testing.T) {
_, client := newGoGitMock(t)
filename := "testfile"
client.EXPECT().OpenDir(repoDir).Return(&goGit.Repository{}, nil)
client.EXPECT().OpenWorktree(gomock.Any()).Do(func(arg0 *goGit.Repository) {}).Return(&goGit.Worktree{}, nil)
client.EXPECT().AddGlob(gomock.Any(), gomock.Any()).Do(func(arg0 string, arg1 *goGit.Worktree) {}).Return(nil)
g := &gitclient.GitClient{
RepoDirectory: repoDir,
Client: client,
}
err := g.Add(filename)
if err != nil {
t.Errorf("Add() error = %v", err)
return
}
}
func TestGoGitRemove(t *testing.T) {
_, client := newGoGitMock(t)
filename := "testfile"
client.EXPECT().OpenDir(repoDir).Return(&goGit.Repository{}, nil)
client.EXPECT().OpenWorktree(gomock.Any()).Do(func(arg0 *goGit.Repository) {}).Return(&goGit.Worktree{}, nil)
client.EXPECT().Remove(gomock.Any(), gomock.Any()).Do(func(arg0 string, arg1 *goGit.Worktree) {}).Return(plumbing.Hash{}, nil)
g := &gitclient.GitClient{
RepoDirectory: repoDir,
Client: client,
}
err := g.Remove(filename)
if err != nil {
t.Errorf("Remove() error = %v", err)
return
}
}
func TestGoGitCommit(t *testing.T) {
_, client := newGoGitMock(t)
message := "message"
client.EXPECT().OpenDir(repoDir).Return(&goGit.Repository{}, nil)
client.EXPECT().OpenWorktree(gomock.Any()).Do(func(arg0 *goGit.Repository) {}).Return(&goGit.Worktree{}, nil)
client.EXPECT().Commit(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(arg0 string, arg1 *object.Signature, arg2 *goGit.Worktree) {}).Return(plumbing.Hash{}, nil)
client.EXPECT().CommitObject(gomock.Any(), gomock.Any()).Do(func(arg0 *goGit.Repository, arg1 plumbing.Hash) {}).Return(&object.Commit{}, nil)
g := &gitclient.GitClient{
RepoDirectory: repoDir,
Client: client,
}
err := g.Commit(message)
if err != nil {
t.Errorf("Commit() error = %v", err)
return
}
}
func TestGoGitPush(t *testing.T) {
ctx, client := newGoGitMock(t)
g := &gitclient.GitClient{
RepoDirectory: repoDir,
Client: client,
}
client.EXPECT().OpenDir(repoDir).Return(&goGit.Repository{}, nil)
client.EXPECT().PushWithContext(ctx, gomock.Any(), gomock.Any()).Do(func(arg0 context.Context, arg1 *goGit.Repository, arg2 transport.AuthMethod) {}).Return(nil)
err := g.Push(ctx)
if err != nil {
t.Errorf("Push() error = %v", err)
return
}
}
func TestGoGitPull(t *testing.T) {
tests := []struct {
name string
wantErr bool
throwError error
matchError error
}{
{
name: "pull success",
wantErr: false,
},
{
name: "repo already up-to-date",
wantErr: true,
throwError: fmt.Errorf("already up-to-date"),
matchError: fmt.Errorf("pulling from remote: %v", goGit.NoErrAlreadyUpToDate),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx, client := newGoGitMock(t)
branch := "testbranch"
g := &gitclient.GitClient{
RepoDirectory: repoDir,
Client: client,
}
client.EXPECT().OpenDir(repoDir).Return(&goGit.Repository{}, nil)
client.EXPECT().OpenWorktree(gomock.Any()).Do(func(arg0 *goGit.Repository) {}).Return(&goGit.Worktree{}, nil)
client.EXPECT().PullWithContext(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Do(func(arg0 context.Context, arg1 *goGit.Worktree, arg2 transport.AuthMethod, name plumbing.ReferenceName) {
}).Return(tt.throwError)
if !tt.wantErr {
client.EXPECT().Head(gomock.Any()).Do(func(arg0 *goGit.Repository) {}).Return(&plumbing.Reference{}, nil)
client.EXPECT().CommitObject(gomock.Any(), gomock.Any()).Do(func(arg0 *goGit.Repository, arg1 plumbing.Hash) {}).Return(&object.Commit{}, nil)
}
err := g.Pull(ctx, branch)
if (err != nil) != tt.wantErr {
t.Errorf("Pull() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
if !reflect.DeepEqual(err, tt.matchError) {
t.Errorf("Pull() error = %v, matchError %v", err, tt.matchError)
}
}
})
}
}
func TestGoGitInit(t *testing.T) {
_, client := newGoGitMock(t)
url := "testurl"
client.EXPECT().Init(repoDir).Return(&goGit.Repository{}, nil)
client.EXPECT().Create(gomock.Any(), url).Do(func(arg0 *goGit.Repository, arg1 string) {}).Return(&goGit.Remote{}, nil)
g := &gitclient.GitClient{
RepoDirectory: repoDir,
RepoUrl: url,
Client: client,
}
err := g.Init()
if err != nil {
t.Errorf("Init() error = %v", err)
return
}
}
func TestGoGitBranch(t *testing.T) {
_, client := newGoGitMock(t)
repo := &goGit.Repository{}
headRef := &plumbing.Reference{}
worktree := &goGit.Worktree{}
bOpts := &config.Branch{
Name: "testBranch",
Remote: "origin",
Merge: "refs/heads/testBranch",
Rebase: "true",
}
cOpts := &goGit.CheckoutOptions{
Branch: plumbing.NewBranchReferenceName("testBranch"),
Force: true,
}
client.EXPECT().OpenDir(repoDir).Return(repo, nil)
client.EXPECT().CreateBranch(repo, bOpts).Return(nil)
client.EXPECT().Head(repo).Return(headRef, nil)
client.EXPECT().OpenWorktree(gomock.Any()).Do(func(arg0 *goGit.Repository) {}).Return(worktree, nil)
client.EXPECT().SetRepositoryReference(repo, gomock.Any()).Return(nil)
client.EXPECT().Checkout(worktree, cOpts).Return(nil)
client.EXPECT().ListRemotes(repo, gomock.Any()).Return(nil, nil)
g := &gitclient.GitClient{
RepoDirectory: repoDir,
Client: client,
}
err := g.Branch("testBranch")
if err != nil {
t.Errorf("Branch() error = %v", err)
return
}
}
func TestGoGitBranchRemoteExists(t *testing.T) {
_, client := newGoGitMock(t)
repo := &goGit.Repository{}
headRef := &plumbing.Reference{}
worktree := &goGit.Worktree{}
bOpts := &config.Branch{
Name: "testBranch",
Remote: "origin",
Merge: "refs/heads/testBranch",
Rebase: "true",
}
localBranchRef := plumbing.NewBranchReferenceName("testBranch")
cOpts := &goGit.CheckoutOptions{
Branch: localBranchRef,
Force: true,
}
returnReferences := []*plumbing.Reference{
plumbing.NewHashReference("refs/heads/testBranch", headRef.Hash()),
}
client.EXPECT().OpenDir(repoDir).Return(repo, nil)
client.EXPECT().CreateBranch(repo, bOpts).Return(nil)
client.EXPECT().Head(repo).Return(headRef, nil)
client.EXPECT().OpenWorktree(gomock.Any()).Do(func(arg0 *goGit.Repository) {}).Return(worktree, nil)
client.EXPECT().SetRepositoryReference(repo, gomock.Any()).Return(nil)
client.EXPECT().Checkout(worktree, cOpts).Return(nil)
client.EXPECT().ListRemotes(repo, gomock.Any()).Return(returnReferences, nil)
client.EXPECT().PullWithContext(gomock.Any(), worktree, gomock.Any(), localBranchRef)
g := &gitclient.GitClient{
RepoDirectory: repoDir,
Client: client,
}
err := g.Branch("testBranch")
if err != nil {
t.Errorf("Branch() error = %v", err)
return
}
}
func TestGoGitValidateRemoteExists(t *testing.T) {
tests := []struct {
name string
wantErr bool
throwError error
}{
{
name: "validate success",
wantErr: false,
},
{
name: "invalid repository error",
wantErr: true,
throwError: fmt.Errorf("remote repository does not exist"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx, client := newGoGitMock(t)
g := &gitclient.GitClient{
RepoUrl: "testurl",
Client: client,
}
remote := &goGit.Remote{}
client.EXPECT().NewRemote(g.RepoUrl, goGit.DefaultRemoteName).Return(remote)
client.EXPECT().ListWithContext(ctx, remote, g.Auth).Return([]*plumbing.Reference{}, tt.throwError)
err := g.ValidateRemoteExists(ctx)
if (err != nil) != tt.wantErr {
t.Errorf("Clone() error = %v, wantErr = %v", err, tt.wantErr)
}
})
}
}
func newGoGitMock(t *testing.T) (context.Context, *mockGitClient.MockGoGit) {
ctx := context.Background()
ctrl := gomock.NewController(t)
client := mockGitClient.NewMockGoGit(ctrl)
return ctx, client
}
| 349 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/git/gitclient (interfaces: GoGit)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
git "github.com/go-git/go-git/v5"
config "github.com/go-git/go-git/v5/config"
plumbing "github.com/go-git/go-git/v5/plumbing"
object "github.com/go-git/go-git/v5/plumbing/object"
transport "github.com/go-git/go-git/v5/plumbing/transport"
gomock "github.com/golang/mock/gomock"
)
// MockGoGit is a mock of GoGit interface.
type MockGoGit struct {
ctrl *gomock.Controller
recorder *MockGoGitMockRecorder
}
// MockGoGitMockRecorder is the mock recorder for MockGoGit.
type MockGoGitMockRecorder struct {
mock *MockGoGit
}
// NewMockGoGit creates a new mock instance.
func NewMockGoGit(ctrl *gomock.Controller) *MockGoGit {
mock := &MockGoGit{ctrl: ctrl}
mock.recorder = &MockGoGitMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGoGit) EXPECT() *MockGoGitMockRecorder {
return m.recorder
}
// AddGlob mocks base method.
func (m *MockGoGit) AddGlob(arg0 string, arg1 *git.Worktree) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddGlob", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// AddGlob indicates an expected call of AddGlob.
func (mr *MockGoGitMockRecorder) AddGlob(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddGlob", reflect.TypeOf((*MockGoGit)(nil).AddGlob), arg0, arg1)
}
// Checkout mocks base method.
func (m *MockGoGit) Checkout(arg0 *git.Worktree, arg1 *git.CheckoutOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Checkout", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Checkout indicates an expected call of Checkout.
func (mr *MockGoGitMockRecorder) Checkout(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Checkout", reflect.TypeOf((*MockGoGit)(nil).Checkout), arg0, arg1)
}
// Clone mocks base method.
func (m *MockGoGit) Clone(arg0 context.Context, arg1, arg2 string, arg3 transport.AuthMethod) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Clone", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Clone indicates an expected call of Clone.
func (mr *MockGoGitMockRecorder) Clone(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clone", reflect.TypeOf((*MockGoGit)(nil).Clone), arg0, arg1, arg2, arg3)
}
// Commit mocks base method.
func (m *MockGoGit) Commit(arg0 string, arg1 *object.Signature, arg2 *git.Worktree) (plumbing.Hash, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit", arg0, arg1, arg2)
ret0, _ := ret[0].(plumbing.Hash)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Commit indicates an expected call of Commit.
func (mr *MockGoGitMockRecorder) Commit(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockGoGit)(nil).Commit), arg0, arg1, arg2)
}
// CommitObject mocks base method.
func (m *MockGoGit) CommitObject(arg0 *git.Repository, arg1 plumbing.Hash) (*object.Commit, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CommitObject", arg0, arg1)
ret0, _ := ret[0].(*object.Commit)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CommitObject indicates an expected call of CommitObject.
func (mr *MockGoGitMockRecorder) CommitObject(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitObject", reflect.TypeOf((*MockGoGit)(nil).CommitObject), arg0, arg1)
}
// Create mocks base method.
func (m *MockGoGit) Create(arg0 *git.Repository, arg1 string) (*git.Remote, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", arg0, arg1)
ret0, _ := ret[0].(*git.Remote)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Create indicates an expected call of Create.
func (mr *MockGoGitMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockGoGit)(nil).Create), arg0, arg1)
}
// CreateBranch mocks base method.
func (m *MockGoGit) CreateBranch(arg0 *git.Repository, arg1 *config.Branch) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateBranch", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateBranch indicates an expected call of CreateBranch.
func (mr *MockGoGitMockRecorder) CreateBranch(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBranch", reflect.TypeOf((*MockGoGit)(nil).CreateBranch), arg0, arg1)
}
// Head mocks base method.
func (m *MockGoGit) Head(arg0 *git.Repository) (*plumbing.Reference, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Head", arg0)
ret0, _ := ret[0].(*plumbing.Reference)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Head indicates an expected call of Head.
func (mr *MockGoGitMockRecorder) Head(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Head", reflect.TypeOf((*MockGoGit)(nil).Head), arg0)
}
// Init mocks base method.
func (m *MockGoGit) Init(arg0 string) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Init", arg0)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Init indicates an expected call of Init.
func (mr *MockGoGitMockRecorder) Init(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockGoGit)(nil).Init), arg0)
}
// ListRemotes mocks base method.
func (m *MockGoGit) ListRemotes(arg0 *git.Repository, arg1 transport.AuthMethod) ([]*plumbing.Reference, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListRemotes", arg0, arg1)
ret0, _ := ret[0].([]*plumbing.Reference)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListRemotes indicates an expected call of ListRemotes.
func (mr *MockGoGitMockRecorder) ListRemotes(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRemotes", reflect.TypeOf((*MockGoGit)(nil).ListRemotes), arg0, arg1)
}
// ListWithContext mocks base method.
func (m *MockGoGit) ListWithContext(arg0 context.Context, arg1 *git.Remote, arg2 transport.AuthMethod) ([]*plumbing.Reference, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListWithContext", arg0, arg1, arg2)
ret0, _ := ret[0].([]*plumbing.Reference)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListWithContext indicates an expected call of ListWithContext.
func (mr *MockGoGitMockRecorder) ListWithContext(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWithContext", reflect.TypeOf((*MockGoGit)(nil).ListWithContext), arg0, arg1, arg2)
}
// NewRemote mocks base method.
func (m *MockGoGit) NewRemote(arg0, arg1 string) *git.Remote {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewRemote", arg0, arg1)
ret0, _ := ret[0].(*git.Remote)
return ret0
}
// NewRemote indicates an expected call of NewRemote.
func (mr *MockGoGitMockRecorder) NewRemote(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemote", reflect.TypeOf((*MockGoGit)(nil).NewRemote), arg0, arg1)
}
// OpenDir mocks base method.
func (m *MockGoGit) OpenDir(arg0 string) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "OpenDir", arg0)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// OpenDir indicates an expected call of OpenDir.
func (mr *MockGoGitMockRecorder) OpenDir(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenDir", reflect.TypeOf((*MockGoGit)(nil).OpenDir), arg0)
}
// OpenWorktree mocks base method.
func (m *MockGoGit) OpenWorktree(arg0 *git.Repository) (*git.Worktree, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "OpenWorktree", arg0)
ret0, _ := ret[0].(*git.Worktree)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// OpenWorktree indicates an expected call of OpenWorktree.
func (mr *MockGoGitMockRecorder) OpenWorktree(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenWorktree", reflect.TypeOf((*MockGoGit)(nil).OpenWorktree), arg0)
}
// PullWithContext mocks base method.
func (m *MockGoGit) PullWithContext(arg0 context.Context, arg1 *git.Worktree, arg2 transport.AuthMethod, arg3 plumbing.ReferenceName) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PullWithContext", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// PullWithContext indicates an expected call of PullWithContext.
func (mr *MockGoGitMockRecorder) PullWithContext(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullWithContext", reflect.TypeOf((*MockGoGit)(nil).PullWithContext), arg0, arg1, arg2, arg3)
}
// PushWithContext mocks base method.
func (m *MockGoGit) PushWithContext(arg0 context.Context, arg1 *git.Repository, arg2 transport.AuthMethod) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushWithContext", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PushWithContext indicates an expected call of PushWithContext.
func (mr *MockGoGitMockRecorder) PushWithContext(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushWithContext", reflect.TypeOf((*MockGoGit)(nil).PushWithContext), arg0, arg1, arg2)
}
// Remove mocks base method.
func (m *MockGoGit) Remove(arg0 string, arg1 *git.Worktree) (plumbing.Hash, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Remove", arg0, arg1)
ret0, _ := ret[0].(plumbing.Hash)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Remove indicates an expected call of Remove.
func (mr *MockGoGitMockRecorder) Remove(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockGoGit)(nil).Remove), arg0, arg1)
}
// SetRepositoryReference mocks base method.
func (m *MockGoGit) SetRepositoryReference(arg0 *git.Repository, arg1 *plumbing.Reference) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetRepositoryReference", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// SetRepositoryReference indicates an expected call of SetRepositoryReference.
func (mr *MockGoGitMockRecorder) SetRepositoryReference(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRepositoryReference", reflect.TypeOf((*MockGoGit)(nil).SetRepositoryReference), arg0, arg1)
}
| 304 |
eks-anywhere | aws | Go | package gogithub
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
"time"
goGithub "github.com/google/go-github/v35/github"
"golang.org/x/oauth2"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type GoGithub struct {
Opts Options
Client Client
}
type Options struct {
Auth git.TokenAuth
}
func New(ctx context.Context, opts Options) *GoGithub {
return &GoGithub{
Opts: opts,
Client: newClient(ctx, opts),
}
}
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
type Client interface {
CreateRepo(ctx context.Context, org string, repo *goGithub.Repository) (*goGithub.Repository, *goGithub.Response, error)
AddDeployKeyToRepo(ctx context.Context, owner, repo string, key *goGithub.Key) error
Repo(ctx context.Context, owner, repo string) (*goGithub.Repository, *goGithub.Response, error)
User(ctx context.Context, user string) (*goGithub.User, *goGithub.Response, error)
Organization(ctx context.Context, org string) (*goGithub.Organization, *goGithub.Response, error)
GetContents(ctx context.Context, owner, repo, path string, opt *goGithub.RepositoryContentGetOptions) (
fileContent *goGithub.RepositoryContent, directoryContent []*goGithub.RepositoryContent, resp *goGithub.Response, err error,
)
DeleteRepo(ctx context.Context, owner, repo string) (*goGithub.Response, error)
}
type githubClient struct {
client *goGithub.Client
}
var HttpClient HTTPClient
func init() {
HttpClient = &http.Client{}
}
func (ggc *githubClient) CreateRepo(ctx context.Context, org string, repo *goGithub.Repository) (*goGithub.Repository, *goGithub.Response, error) {
return ggc.client.Repositories.Create(ctx, org, repo)
}
func (ggc *githubClient) Repo(ctx context.Context, owner, repo string) (*goGithub.Repository, *goGithub.Response, error) {
return ggc.client.Repositories.Get(ctx, owner, repo)
}
func (ggc *githubClient) User(ctx context.Context, user string) (*goGithub.User, *goGithub.Response, error) {
return ggc.client.Users.Get(ctx, user)
}
func (ggc *githubClient) Organization(ctx context.Context, org string) (*goGithub.Organization, *goGithub.Response, error) {
return ggc.client.Organizations.Get(ctx, org)
}
func (ggc *githubClient) GetContents(ctx context.Context, owner, repo, path string, opt *goGithub.RepositoryContentGetOptions) (fileContent *goGithub.RepositoryContent, directoryContent []*goGithub.RepositoryContent, resp *goGithub.Response, err error) {
return ggc.client.Repositories.GetContents(ctx, owner, repo, path, opt)
}
func (ggc *githubClient) DeleteRepo(ctx context.Context, owner, repo string) (*goGithub.Response, error) {
return ggc.client.Repositories.Delete(ctx, owner, repo)
}
func (ggc *githubClient) AddDeployKeyToRepo(ctx context.Context, owner, repo string, key *goGithub.Key) error {
_, resp, err := ggc.client.Repositories.CreateKey(ctx, owner, repo, key)
if err != nil {
logger.Info("createKey response", "resp", resp)
return fmt.Errorf("adding deploy key to repo: %v", err)
}
return err
}
// CreateRepo creates an empty Github Repository. The repository must be initialized locally or
// file must be added to it via the github api before it can be successfully cloned.
func (g *GoGithub) CreateRepo(ctx context.Context, opts git.CreateRepoOpts) (repository *git.Repository, err error) {
logger.V(3).Info("Attempting to create new Github repo", "repo", opts.Name, "owner", opts.Owner)
r := &goGithub.Repository{
Name: &opts.Name,
Private: &opts.Privacy,
Description: &opts.Description,
AutoInit: &opts.AutoInit,
}
org := ""
if !opts.Personal {
org = opts.Owner
logger.V(4).Info("Not a personal repository; using repository Owner as Org", "org", org, "owner", opts.Owner)
}
repo, _, err := g.Client.CreateRepo(ctx, org, r)
if err != nil {
return nil, fmt.Errorf("failed to create new Github repo %s: %v", opts.Name, err)
}
logger.V(3).Info("Successfully created new Github repo", "repo", repo.GetName(), "owner", opts.Owner)
return &git.Repository{
Name: repo.GetName(),
CloneUrl: repo.GetCloneURL(),
Owner: repo.GetOwner().GetName(),
Organization: repo.GetOrganization().GetName(),
}, err
}
func (g *GoGithub) GetAccessTokenPermissions(accessToken string) (string, error) {
req, err := http.NewRequest("HEAD", "https://api.github.com/users/codertocat", nil)
if err != nil {
return "", err
}
req.Header.Set("Authorization", "token "+accessToken)
var resp *http.Response
r := retrier.New(3 * time.Minute)
err = r.Retry(func() error {
resp, err = HttpClient.Do(req)
if err != nil {
return fmt.Errorf("getting Github Personal Access Token permissions %v", err)
}
return nil
})
permissionsScopes := resp.Header.Get("X-Oauth-Scopes")
defer resp.Body.Close()
return permissionsScopes, nil
}
func (g *GoGithub) CheckAccessTokenPermissions(checkPATPermission string, allPermissionScopes string) error {
logger.Info("Checking Github Access Token permissions")
allPermissions := strings.Split(allPermissionScopes, ", ")
for _, permission := range allPermissions {
if permission == checkPATPermission {
return nil
}
}
return errors.New("github access token does not have repo permissions")
}
// GetRepo describes a remote repository, return the repo name if it exists.
// If the repo does not exist, resulting in a 404 exception, it returns a `RepoDoesNotExist` error.
func (g *GoGithub) GetRepo(ctx context.Context, opts git.GetRepoOpts) (*git.Repository, error) {
r := opts.Repository
o := opts.Owner
logger.V(3).Info("Describing Github repository", "name", r, "owner", o)
repo, _, err := g.Client.Repo(ctx, o, r)
if err != nil {
if isNotFound(err) {
return nil, &git.RepositoryDoesNotExistError{Err: err}
}
return nil, fmt.Errorf("unexpected error when describing repository %s: %w", r, err)
}
return &git.Repository{
Name: repo.GetName(),
CloneUrl: repo.GetCloneURL(),
Owner: repo.GetOwner().GetName(),
Organization: repo.GetOrganization().GetName(),
}, err
}
func (g *GoGithub) AuthenticatedUser(ctx context.Context) (*goGithub.User, error) {
githubUser, _, err := g.Client.User(ctx, "") // passing the empty string will fetch the authenticated
if err != nil {
return nil, fmt.Errorf("failed while getting the authenticated github user %v", err)
}
return githubUser, nil
}
func (g *GoGithub) Organization(ctx context.Context, org string) (*goGithub.Organization, error) {
organization, _, err := g.Client.Organization(ctx, org)
if err != nil {
return nil, fmt.Errorf("failed while getting github organization %s details %v", org, err)
}
return organization, nil
}
// PathExists checks if a path exists in the remote repository. If the owner, repository or branch doesn't exist,
// it returns false and no error.
func (g *GoGithub) PathExists(ctx context.Context, owner, repo, branch, path string) (bool, error) {
_, _, _, err := g.Client.GetContents(
ctx,
owner,
repo,
path,
&goGithub.RepositoryContentGetOptions{Ref: branch},
)
if isNotFound(err) {
return false, nil
}
if err != nil {
return false, fmt.Errorf("failed checking if path %s exists in remote github repository: %v", path, err)
}
return true, nil
}
func (g *GoGithub) AddDeployKeyToRepo(ctx context.Context, opts git.AddDeployKeyOpts) error {
logger.V(3).Info("Adding deploy key to repository", "repository", opts.Repository, "owner", opts.Owner)
k := &goGithub.Key{
Key: &opts.Key,
Title: &opts.Title,
ReadOnly: &opts.ReadOnly,
}
return g.Client.AddDeployKeyToRepo(ctx, opts.Owner, opts.Repository, k)
}
// DeleteRepo deletes a Github repository.
func (g *GoGithub) DeleteRepo(ctx context.Context, opts git.DeleteRepoOpts) error {
r := opts.Repository
o := opts.Owner
logger.V(3).Info("Deleting Github repository", "name", r, "owner", o)
_, err := g.Client.DeleteRepo(ctx, o, r)
if err != nil {
return fmt.Errorf("deleting repository %s: %v", r, err)
}
return nil
}
func newClient(ctx context.Context, opts Options) Client {
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: opts.Auth.Token})
tc := oauth2.NewClient(ctx, ts)
return &githubClient{goGithub.NewClient(tc)}
}
func isNotFound(err error) bool {
var e *goGithub.ErrorResponse
return errors.As(err, &e) && e.Response.StatusCode == http.StatusNotFound
}
| 250 |
eks-anywhere | aws | Go | package gogithub_test
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"reflect"
"testing"
"github.com/golang/mock/gomock"
"github.com/google/go-github/v35/github"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/git/gogithub"
mockGoGithub "github.com/aws/eks-anywhere/pkg/git/gogithub/mocks"
)
const repoPermissions = "repo"
func TestGoGithubCreateRepo(t *testing.T) {
type fields struct {
opts gogithub.Options
}
type args struct {
opts git.CreateRepoOpts
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "create public repo with organizational owner",
fields: fields{
opts: gogithub.Options{},
},
args: args{
opts: git.CreateRepoOpts{
Name: "testrepo",
Description: "unit test repo",
Owner: "testorganization",
Personal: false,
},
},
},
{
name: "create personal repo",
fields: fields{
opts: gogithub.Options{},
},
args: args{
opts: git.CreateRepoOpts{
Name: "testrepo",
Description: "unit test repo",
Owner: "testuser",
Personal: true,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
client := mockGoGithub.NewMockClient(mockCtrl)
returnRepo := &github.Repository{
Name: &tt.args.opts.Name,
CloneURL: &tt.args.opts.Description,
Owner: &github.User{
Name: &tt.args.opts.Owner,
},
Organization: &github.Organization{
Name: &tt.args.opts.Name,
},
}
client.EXPECT().CreateRepo(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(arg0 context.Context, org string, arg2 *github.Repository) {
returnRepo.Organization.Name = &org
}).Return(
returnRepo, nil, nil)
g := &gogithub.GoGithub{
Opts: tt.fields.opts,
Client: client,
}
gotRepository, err := g.CreateRepo(ctx, tt.args.opts)
if (err != nil) != tt.wantErr {
t.Errorf("CreateRepo() error = %v, wantErr %v", err, tt.wantErr)
return
}
wantRepository := &git.Repository{
Name: returnRepo.GetName(),
Organization: returnRepo.GetOrganization().GetName(),
CloneUrl: returnRepo.GetCloneURL(),
Owner: returnRepo.GetOwner().GetName(),
}
if tt.args.opts.Personal && gotRepository.Organization != "" {
t.Errorf("for personal account org should be empty")
}
if !reflect.DeepEqual(gotRepository, wantRepository) {
t.Errorf("CreateRepo() gotRepository = %v, want %v", gotRepository, wantRepository)
}
})
}
}
func TestGoGithubGetRepo(t *testing.T) {
type fields struct {
opts gogithub.Options
}
type args struct {
opts git.GetRepoOpts
name string
cloneURL string
orgName string
}
tests := []struct {
name string
fields fields
args args
wantErr bool
throwError error
matchError error
}{
{
name: "Repo no error",
args: args{
opts: git.GetRepoOpts{
Owner: "owner1",
Repository: "repo1",
},
cloneURL: "url1",
name: "repo1",
orgName: "org1",
},
wantErr: false,
},
{
name: "github client threw generic error",
args: args{
opts: git.GetRepoOpts{
Owner: "owner1",
Repository: "repo1",
},
cloneURL: "url1",
name: "repo1",
orgName: "org1",
},
wantErr: true,
throwError: fmt.Errorf("github client threw error"),
matchError: fmt.Errorf("unexpected error when describing repository %s: %w", "repo1", fmt.Errorf("github client threw error")),
},
{
name: "github threw 404 error",
args: args{
opts: git.GetRepoOpts{
Owner: "owner1",
Repository: "repo1",
},
cloneURL: "url1",
name: "repo1",
orgName: "org1",
},
wantErr: true,
throwError: notFoundError(),
matchError: &git.RepositoryDoesNotExistError{Err: fmt.Errorf("GET : 404 Not found []")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
client := mockGoGithub.NewMockClient(mockCtrl)
returnRepo := &github.Repository{
Name: &tt.args.name,
CloneURL: &tt.args.cloneURL,
Owner: &github.User{
Name: &tt.args.opts.Owner,
},
Organization: &github.Organization{
Name: &tt.args.orgName,
},
}
client.EXPECT().Repo(ctx, tt.args.opts.Owner, tt.args.opts.Repository).
Return(returnRepo, nil, tt.throwError)
g := &gogithub.GoGithub{
Opts: tt.fields.opts,
Client: client,
}
got, err := g.GetRepo(ctx, tt.args.opts)
wantRepository := &git.Repository{
Name: tt.args.name,
Organization: tt.args.orgName,
CloneUrl: tt.args.cloneURL,
Owner: tt.args.opts.Owner,
}
if (err != nil) != tt.wantErr {
t.Errorf("Repo() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
switch tt.matchError.(type) {
case *git.RepositoryDoesNotExistError:
_, typeMatches := err.(*git.RepositoryDoesNotExistError)
if !typeMatches || err.Error() != tt.matchError.Error() {
t.Errorf("Repo() error = %v, matchError %v", err, tt.matchError)
}
default:
if !reflect.DeepEqual(err, tt.matchError) {
t.Errorf("Repo() error = %v, matchError %v", err, tt.matchError)
}
}
}
if !tt.wantErr && !reflect.DeepEqual(got, wantRepository) {
t.Errorf("Repo() got = %v, want %v", got, wantRepository)
}
})
}
}
func TestGoGithubDeleteRepoSuccess(t *testing.T) {
type fields struct {
opts gogithub.Options
}
tests := []struct {
name string
fields fields
args git.DeleteRepoOpts
wantErr error
}{
{
name: "github repo deleted successfully",
args: git.DeleteRepoOpts{
Owner: "owner1",
Repository: "repo1",
},
wantErr: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
client := mockGoGithub.NewMockClient(mockCtrl)
client.EXPECT().DeleteRepo(ctx, tt.args.Owner, tt.args.Repository).Return(nil, tt.wantErr)
g := &gogithub.GoGithub{
Opts: tt.fields.opts,
Client: client,
}
err := g.DeleteRepo(ctx, tt.args)
if err != tt.wantErr {
t.Errorf("DeleteRepo() got error: %v want error: %v", err, tt.wantErr)
}
})
}
}
func TestGoGithubDeleteRepoFail(t *testing.T) {
type fields struct {
opts gogithub.Options
}
tests := []struct {
name string
fields fields
args git.DeleteRepoOpts
wantErr error
throwErr error
}{
{
name: "github repo delete fail",
args: git.DeleteRepoOpts{
Owner: "owner1",
Repository: "repo1",
},
wantErr: fmt.Errorf("deleting repository repo1: github client threw error"),
throwErr: fmt.Errorf("github client threw error"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
client := mockGoGithub.NewMockClient(mockCtrl)
client.EXPECT().DeleteRepo(ctx, tt.args.Owner, tt.args.Repository).Return(nil, tt.throwErr)
g := &gogithub.GoGithub{
Opts: tt.fields.opts,
Client: client,
}
err := g.DeleteRepo(ctx, tt.args)
if !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("DeleteRepo() got error: %v want error: %v", err, tt.wantErr)
}
})
}
}
func TestGoGithubAddDeployKeySuccess(t *testing.T) {
type fields struct {
opts gogithub.Options
}
tests := []struct {
name string
fields fields
args git.AddDeployKeyOpts
wantErr error
}{
{
name: "github repo deleted successfully",
args: git.AddDeployKeyOpts{
Owner: "owner1",
Repository: "repo1",
Key: "KEY STRING YO",
Title: "My test key",
ReadOnly: false,
},
wantErr: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
client := mockGoGithub.NewMockClient(mockCtrl)
k := &github.Key{
Key: &tt.args.Key,
Title: &tt.args.Title,
ReadOnly: &tt.args.ReadOnly,
}
client.EXPECT().AddDeployKeyToRepo(ctx, tt.args.Owner, tt.args.Repository, k).Return(tt.wantErr)
g := &gogithub.GoGithub{
Opts: tt.fields.opts,
Client: client,
}
err := g.AddDeployKeyToRepo(ctx, tt.args)
if err != tt.wantErr {
t.Errorf("AddDeployKeyToRepo() got error: %v want error: %v", err, tt.wantErr)
}
})
}
}
func TestGoGithub_CheckAccessTokenPermissions(t *testing.T) {
type fields struct {
opts gogithub.Options
}
tests := []struct {
name string
allPermissions string
fields fields
wantErr error
}{
{
name: "token with repo permissions",
allPermissions: "admin, repo",
fields: fields{
opts: gogithub.Options{},
},
wantErr: nil,
},
{
name: "token without repo permissions",
allPermissions: "admin, workflow",
fields: fields{
opts: gogithub.Options{},
},
wantErr: errors.New("github access token does not have repo permissions"),
},
{
name: "token with repo permissions",
allPermissions: "",
fields: fields{
opts: gogithub.Options{},
},
wantErr: errors.New("github access token does not have repo permissions"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
client := mockGoGithub.NewMockClient(mockCtrl)
g := &gogithub.GoGithub{
Opts: tt.fields.opts,
Client: client,
}
gotError := g.CheckAccessTokenPermissions(repoPermissions, tt.allPermissions)
if !reflect.DeepEqual(gotError, tt.wantErr) {
t.Errorf("Test %v\n got %v\n want %v", tt.name, gotError, tt.wantErr)
}
})
}
}
func TestPathExistsError(t *testing.T) {
tt := newTest(t)
owner, repo, branch, path := pathArgs()
tt.client.EXPECT().GetContents(
tt.ctx, owner, repo, path, &github.RepositoryContentGetOptions{Ref: branch},
).Return(nil, nil, nil, errors.New("can't get content"))
_, err := tt.g.PathExists(tt.ctx, owner, repo, branch, path)
tt.Expect(err).To(HaveOccurred())
}
func TestPathExistsItDoes(t *testing.T) {
tt := newTest(t)
owner, repo, branch, path := pathArgs()
tt.client.EXPECT().GetContents(
tt.ctx, owner, repo, path, &github.RepositoryContentGetOptions{Ref: branch},
).Return(nil, nil, nil, notFoundError())
tt.Expect(tt.g.PathExists(tt.ctx, owner, repo, branch, path)).To(BeFalse())
}
func TestPathExistsItDoesNot(t *testing.T) {
tt := newTest(t)
owner, repo, branch, path := pathArgs()
tt.client.EXPECT().GetContents(
tt.ctx, owner, repo, path, &github.RepositoryContentGetOptions{Ref: branch},
).Return(nil, nil, nil, nil)
tt.Expect(tt.g.PathExists(tt.ctx, owner, repo, branch, path)).To(BeTrue())
}
type gogithubTest struct {
*WithT
g *gogithub.GoGithub
opts gogithub.Options
client *mockGoGithub.MockClient
ctx context.Context
}
func newTest(t *testing.T) *gogithubTest {
withT := NewWithT(t)
ctx := context.Background()
ctrl := gomock.NewController(t)
client := mockGoGithub.NewMockClient(ctrl)
opts := gogithub.Options{
Auth: git.TokenAuth{
Username: "user",
Token: "token",
},
}
g := &gogithub.GoGithub{
Opts: opts,
Client: client,
}
return &gogithubTest{
WithT: withT,
g: g,
opts: opts,
client: client,
ctx: ctx,
}
}
func pathArgs() (owner, repo, branch, path string) {
path = "fluxFolder"
branch = "main"
owner = "aws"
repo = "eksa-gitops"
return owner, repo, path, branch
}
func notFoundError() error {
return &github.ErrorResponse{
Message: "Not found",
Response: &http.Response{
StatusCode: http.StatusNotFound,
Request: &http.Request{
Method: "GET",
URL: &url.URL{},
},
},
}
}
| 503 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/git/gogithub (interfaces: Client)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
github "github.com/google/go-github/v35/github"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// AddDeployKeyToRepo mocks base method.
func (m *MockClient) AddDeployKeyToRepo(arg0 context.Context, arg1, arg2 string, arg3 *github.Key) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDeployKeyToRepo", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// AddDeployKeyToRepo indicates an expected call of AddDeployKeyToRepo.
func (mr *MockClientMockRecorder) AddDeployKeyToRepo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeployKeyToRepo", reflect.TypeOf((*MockClient)(nil).AddDeployKeyToRepo), arg0, arg1, arg2, arg3)
}
// CreateRepo mocks base method.
func (m *MockClient) CreateRepo(arg0 context.Context, arg1 string, arg2 *github.Repository) (*github.Repository, *github.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRepo", arg0, arg1, arg2)
ret0, _ := ret[0].(*github.Repository)
ret1, _ := ret[1].(*github.Response)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// CreateRepo indicates an expected call of CreateRepo.
func (mr *MockClientMockRecorder) CreateRepo(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRepo", reflect.TypeOf((*MockClient)(nil).CreateRepo), arg0, arg1, arg2)
}
// DeleteRepo mocks base method.
func (m *MockClient) DeleteRepo(arg0 context.Context, arg1, arg2 string) (*github.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteRepo", arg0, arg1, arg2)
ret0, _ := ret[0].(*github.Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteRepo indicates an expected call of DeleteRepo.
func (mr *MockClientMockRecorder) DeleteRepo(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRepo", reflect.TypeOf((*MockClient)(nil).DeleteRepo), arg0, arg1, arg2)
}
// GetContents mocks base method.
func (m *MockClient) GetContents(arg0 context.Context, arg1, arg2, arg3 string, arg4 *github.RepositoryContentGetOptions) (*github.RepositoryContent, []*github.RepositoryContent, *github.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetContents", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*github.RepositoryContent)
ret1, _ := ret[1].([]*github.RepositoryContent)
ret2, _ := ret[2].(*github.Response)
ret3, _ := ret[3].(error)
return ret0, ret1, ret2, ret3
}
// GetContents indicates an expected call of GetContents.
func (mr *MockClientMockRecorder) GetContents(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContents", reflect.TypeOf((*MockClient)(nil).GetContents), arg0, arg1, arg2, arg3, arg4)
}
// Organization mocks base method.
func (m *MockClient) Organization(arg0 context.Context, arg1 string) (*github.Organization, *github.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Organization", arg0, arg1)
ret0, _ := ret[0].(*github.Organization)
ret1, _ := ret[1].(*github.Response)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// Organization indicates an expected call of Organization.
func (mr *MockClientMockRecorder) Organization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Organization", reflect.TypeOf((*MockClient)(nil).Organization), arg0, arg1)
}
// Repo mocks base method.
func (m *MockClient) Repo(arg0 context.Context, arg1, arg2 string) (*github.Repository, *github.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Repo", arg0, arg1, arg2)
ret0, _ := ret[0].(*github.Repository)
ret1, _ := ret[1].(*github.Response)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// Repo indicates an expected call of Repo.
func (mr *MockClientMockRecorder) Repo(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repo", reflect.TypeOf((*MockClient)(nil).Repo), arg0, arg1, arg2)
}
// User mocks base method.
func (m *MockClient) User(arg0 context.Context, arg1 string) (*github.User, *github.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "User", arg0, arg1)
ret0, _ := ret[0].(*github.User)
ret1, _ := ret[1].(*github.Response)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// User indicates an expected call of User.
func (mr *MockClientMockRecorder) User(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "User", reflect.TypeOf((*MockClient)(nil).User), arg0, arg1)
}
| 147 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/git (interfaces: Client,ProviderClient)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
git "github.com/aws/eks-anywhere/pkg/git"
gomock "github.com/golang/mock/gomock"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// Add mocks base method.
func (m *MockClient) Add(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Add", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Add indicates an expected call of Add.
func (mr *MockClientMockRecorder) Add(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockClient)(nil).Add), arg0)
}
// Branch mocks base method.
func (m *MockClient) Branch(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Branch", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Branch indicates an expected call of Branch.
func (mr *MockClientMockRecorder) Branch(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Branch", reflect.TypeOf((*MockClient)(nil).Branch), arg0)
}
// Clone mocks base method.
func (m *MockClient) Clone(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Clone", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Clone indicates an expected call of Clone.
func (mr *MockClientMockRecorder) Clone(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clone", reflect.TypeOf((*MockClient)(nil).Clone), arg0)
}
// Commit mocks base method.
func (m *MockClient) Commit(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Commit indicates an expected call of Commit.
func (mr *MockClientMockRecorder) Commit(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockClient)(nil).Commit), arg0)
}
// Init mocks base method.
func (m *MockClient) Init() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Init")
ret0, _ := ret[0].(error)
return ret0
}
// Init indicates an expected call of Init.
func (mr *MockClientMockRecorder) Init() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockClient)(nil).Init))
}
// Pull mocks base method.
func (m *MockClient) Pull(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Pull", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Pull indicates an expected call of Pull.
func (mr *MockClientMockRecorder) Pull(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pull", reflect.TypeOf((*MockClient)(nil).Pull), arg0, arg1)
}
// Push mocks base method.
func (m *MockClient) Push(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Push", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Push indicates an expected call of Push.
func (mr *MockClientMockRecorder) Push(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockClient)(nil).Push), arg0)
}
// Remove mocks base method.
func (m *MockClient) Remove(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Remove", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Remove indicates an expected call of Remove.
func (mr *MockClientMockRecorder) Remove(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockClient)(nil).Remove), arg0)
}
// ValidateRemoteExists mocks base method.
func (m *MockClient) ValidateRemoteExists(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateRemoteExists", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateRemoteExists indicates an expected call of ValidateRemoteExists.
func (mr *MockClientMockRecorder) ValidateRemoteExists(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateRemoteExists", reflect.TypeOf((*MockClient)(nil).ValidateRemoteExists), arg0)
}
// MockProviderClient is a mock of ProviderClient interface.
type MockProviderClient struct {
ctrl *gomock.Controller
recorder *MockProviderClientMockRecorder
}
// MockProviderClientMockRecorder is the mock recorder for MockProviderClient.
type MockProviderClientMockRecorder struct {
mock *MockProviderClient
}
// NewMockProviderClient creates a new mock instance.
func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient {
mock := &MockProviderClient{ctrl: ctrl}
mock.recorder = &MockProviderClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder {
return m.recorder
}
// AddDeployKeyToRepo mocks base method.
func (m *MockProviderClient) AddDeployKeyToRepo(arg0 context.Context, arg1 git.AddDeployKeyOpts) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDeployKeyToRepo", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// AddDeployKeyToRepo indicates an expected call of AddDeployKeyToRepo.
func (mr *MockProviderClientMockRecorder) AddDeployKeyToRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeployKeyToRepo", reflect.TypeOf((*MockProviderClient)(nil).AddDeployKeyToRepo), arg0, arg1)
}
// CreateRepo mocks base method.
func (m *MockProviderClient) CreateRepo(arg0 context.Context, arg1 git.CreateRepoOpts) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRepo", arg0, arg1)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateRepo indicates an expected call of CreateRepo.
func (mr *MockProviderClientMockRecorder) CreateRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRepo", reflect.TypeOf((*MockProviderClient)(nil).CreateRepo), arg0, arg1)
}
// DeleteRepo mocks base method.
func (m *MockProviderClient) DeleteRepo(arg0 context.Context, arg1 git.DeleteRepoOpts) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteRepo", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteRepo indicates an expected call of DeleteRepo.
func (mr *MockProviderClientMockRecorder) DeleteRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRepo", reflect.TypeOf((*MockProviderClient)(nil).DeleteRepo), arg0, arg1)
}
// GetRepo mocks base method.
func (m *MockProviderClient) GetRepo(arg0 context.Context) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRepo", arg0)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetRepo indicates an expected call of GetRepo.
func (mr *MockProviderClientMockRecorder) GetRepo(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRepo", reflect.TypeOf((*MockProviderClient)(nil).GetRepo), arg0)
}
// PathExists mocks base method.
func (m *MockProviderClient) PathExists(arg0 context.Context, arg1, arg2, arg3, arg4 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PathExists", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PathExists indicates an expected call of PathExists.
func (mr *MockProviderClientMockRecorder) PathExists(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PathExists", reflect.TypeOf((*MockProviderClient)(nil).PathExists), arg0, arg1, arg2, arg3, arg4)
}
// Validate mocks base method.
func (m *MockProviderClient) Validate(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Validate", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Validate indicates an expected call of Validate.
func (mr *MockProviderClientMockRecorder) Validate(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockProviderClient)(nil).Validate), arg0)
}
| 273 |
eks-anywhere | aws | Go | package github
import (
"context"
"errors"
"fmt"
"os"
"regexp"
"strings"
goGithub "github.com/google/go-github/v35/github"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
GitProviderName = "github"
EksaGithubTokenEnv = "EKSA_GITHUB_TOKEN"
GithubTokenEnv = "GITHUB_TOKEN"
githubUrlTemplate = "https://github.com/%v/%v.git"
patRegex = "^ghp_[a-zA-Z0-9]{36}|github_pat_[a-zA-Z0-9]{22}_[a-zA-Z0-9]{59}$"
repoPermissions = "repo"
)
type githubProvider struct {
githubProviderClient GithubClient
config *v1alpha1.GithubProviderConfig
auth git.TokenAuth
}
type Options struct {
Repository string
Owner string
Personal bool
}
// GithubClient represents the attributes that the Github provider requires of a library to directly connect to and interact with the Github API.
type GithubClient interface {
GetRepo(ctx context.Context, opts git.GetRepoOpts) (repo *git.Repository, err error)
CreateRepo(ctx context.Context, opts git.CreateRepoOpts) (repo *git.Repository, err error)
AddDeployKeyToRepo(ctx context.Context, opts git.AddDeployKeyOpts) error
AuthenticatedUser(ctx context.Context) (*goGithub.User, error)
Organization(ctx context.Context, org string) (*goGithub.Organization, error)
GetAccessTokenPermissions(accessToken string) (string, error)
CheckAccessTokenPermissions(checkPATPermission string, allPermissionScopes string) error
PathExists(ctx context.Context, owner, repo, branch, path string) (bool, error)
DeleteRepo(ctx context.Context, opts git.DeleteRepoOpts) error
}
func New(githubProviderClient GithubClient, config *v1alpha1.GithubProviderConfig, auth git.TokenAuth) (*githubProvider, error) {
return &githubProvider{
githubProviderClient: githubProviderClient,
config: config,
auth: auth,
}, nil
}
// CreateRepo creates an empty Github Repository. The repository must be initialized locally or
// file must be added to it via the github api before it can be successfully cloned.
func (g *githubProvider) CreateRepo(ctx context.Context, opts git.CreateRepoOpts) (repository *git.Repository, err error) {
return g.githubProviderClient.CreateRepo(ctx, opts)
}
// GetRepo describes a remote repository, return the repo name if it exists.
// If the repo does not exist, a nil repo is returned.
func (g *githubProvider) GetRepo(ctx context.Context) (*git.Repository, error) {
r := g.config.Repository
o := g.config.Owner
logger.V(3).Info("Describing Github repository", "name", r, "owner", o)
opts := git.GetRepoOpts{Owner: o, Repository: r}
repo, err := g.githubProviderClient.GetRepo(ctx, opts)
if err != nil {
var e *git.RepositoryDoesNotExistError
if errors.As(err, &e) {
return nil, nil
}
return nil, fmt.Errorf("unexpected error when describing repository %s: %w", r, err)
}
return repo, err
}
func (g *githubProvider) AddDeployKeyToRepo(ctx context.Context, opts git.AddDeployKeyOpts) error {
return g.githubProviderClient.AddDeployKeyToRepo(ctx, opts)
}
// validates the github setup and access.
func (g *githubProvider) Validate(ctx context.Context) error {
user, err := g.githubProviderClient.AuthenticatedUser(ctx)
if err != nil {
return err
}
accessToken := g.auth.Token
allPermissions, err := g.githubProviderClient.GetAccessTokenPermissions(accessToken)
if err != nil {
return err
}
err = g.githubProviderClient.CheckAccessTokenPermissions(repoPermissions, allPermissions)
if err != nil {
return err
}
logger.MarkPass("Github personal access token has the required repo permissions")
if g.config.Personal {
if !strings.EqualFold(g.config.Owner, *user.Login) {
return fmt.Errorf("the authenticated Github.com user and owner %s specified in the EKS-A gitops spec don't match; confirm access token owner is %s", g.config.Owner, g.config.Owner)
}
return nil
}
org, err := g.githubProviderClient.Organization(ctx, g.config.Owner)
if err != nil {
return fmt.Errorf("the authenticated github user doesn't have proper access to github organization %s, %v", g.config.Owner, err)
}
if org == nil { // for now only checks if user belongs to the org
return fmt.Errorf("the authenticated github user doesn't have proper access to github organization %s", g.config.Owner)
}
return nil
}
func validateGithubAccessToken() error {
r := regexp.MustCompile(patRegex)
logger.V(4).Info("Checking validity of Github Access Token environment variable", "env var", EksaGithubTokenEnv)
val, ok := os.LookupEnv(EksaGithubTokenEnv)
if !ok {
return fmt.Errorf("github access token environment variable %s is invalid; could not get var from environment", EksaGithubTokenEnv)
}
if !r.MatchString(val) {
return fmt.Errorf("github access token environment variable %s is invalid; must match format %s", EksaGithubTokenEnv, patRegex)
}
return nil
}
func GetGithubAccessTokenFromEnv() (string, error) {
err := validateGithubAccessToken()
if err != nil {
return "", err
}
env := make(map[string]string)
if val, ok := os.LookupEnv(EksaGithubTokenEnv); ok && len(val) > 0 {
env[GithubTokenEnv] = val
if err := os.Setenv(GithubTokenEnv, val); err != nil {
return "", fmt.Errorf("unable to set %s: %v", GithubTokenEnv, err)
}
}
return env[GithubTokenEnv], nil
}
func (g *githubProvider) PathExists(ctx context.Context, owner, repo, branch, path string) (bool, error) {
return g.githubProviderClient.PathExists(ctx, owner, repo, branch, path)
}
func (g *githubProvider) DeleteRepo(ctx context.Context, opts git.DeleteRepoOpts) error {
return g.githubProviderClient.DeleteRepo(ctx, opts)
}
type GitProviderNotFoundError struct {
Provider string
}
func (e *GitProviderNotFoundError) Error() string {
return fmt.Sprintf("git provider %s not found", e.Provider)
}
func RepoUrl(owner string, repo string) string {
return fmt.Sprintf(githubUrlTemplate, owner, repo)
}
| 168 |
eks-anywhere | aws | Go | package github_test
import (
"context"
"fmt"
"math/rand"
"reflect"
"testing"
"github.com/golang/mock/gomock"
goGithub "github.com/google/go-github/v35/github"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/git/providers/github"
"github.com/aws/eks-anywhere/pkg/git/providers/github/mocks"
)
var validPATValues = []string{"ghp_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", "github_pat_abcdefghijklmnopqrstuv_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456"}
func TestValidate(t *testing.T) {
tests := []struct {
testName string
owner string
repository string
personal bool
authenticatedUser string
allPATPermissions string
wantErr error
}{
{
testName: "good personal repo",
owner: "Jeff",
repository: "testRepo",
personal: true,
authenticatedUser: "Jeff",
allPATPermissions: "repo, notrepo, admin",
},
{
testName: "good organization repo",
owner: "orgA",
repository: "testRepo",
personal: false,
authenticatedUser: "Jeff",
allPATPermissions: "repo, notrepo, admin",
},
{
testName: "user specified wrong owner in spec for a personal repo",
owner: "nobody",
repository: "testRepo",
personal: true,
authenticatedUser: "Jeff",
allPATPermissions: "repo, notrepo, admin",
wantErr: fmt.Errorf("the authenticated Github.com user and owner %s specified in the EKS-A gitops spec don't match; confirm access token owner is %s", "nobody", "nobody"),
},
{
testName: "user doesn't belong to the organization or wrong organization",
owner: "hiddenOrg",
repository: "testRepo",
personal: false,
authenticatedUser: "Jeff",
allPATPermissions: "repo, notrepo, admin",
wantErr: fmt.Errorf("the authenticated github user doesn't have proper access to github organization %s", "hiddenOrg"),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
validPATValue := validPATValues[rand.Intn(len(validPATValues))]
ctx := context.Background()
githubproviderclient := mocks.NewMockGithubClient(mockCtrl)
authenticatedUser := &goGithub.User{Login: &tt.authenticatedUser}
githubproviderclient.EXPECT().AuthenticatedUser(ctx).Return(authenticatedUser, nil)
githubproviderclient.EXPECT().GetAccessTokenPermissions(validPATValue).Return(tt.allPATPermissions, nil)
githubproviderclient.EXPECT().CheckAccessTokenPermissions("repo", tt.allPATPermissions).Return(nil)
auth := git.TokenAuth{Token: validPATValue, Username: tt.owner}
config := &v1alpha1.GithubProviderConfig{
Owner: tt.owner,
Repository: tt.repository,
Personal: tt.personal,
}
githubProvider, err := github.New(githubproviderclient, config, auth)
if err != nil {
t.Errorf("instantiating github provider: %v, wanted nil", err)
}
if !tt.personal {
if tt.wantErr == nil {
githubproviderclient.EXPECT().Organization(ctx, tt.owner).Return(&goGithub.Organization{Login: &tt.owner}, nil)
} else {
githubproviderclient.EXPECT().Organization(ctx, tt.owner).Return(nil, nil)
}
}
err = githubProvider.Validate(ctx)
if !reflect.DeepEqual(tt.wantErr, err) {
t.Errorf("%v got = %v, want %v", tt.testName, err, tt.wantErr)
}
})
}
}
func setupContext(t *testing.T) {
validPATValue := validPATValues[rand.Intn(len(validPATValues))]
t.Setenv(github.EksaGithubTokenEnv, validPATValue)
t.Setenv(github.GithubTokenEnv, validPATValue)
}
func TestIsGithubAccessTokenValidWithEnv(t *testing.T) {
setupContext(t)
tests := []struct {
testName string
}{
{
testName: "no token path",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
_, err := github.GetGithubAccessTokenFromEnv()
if err != nil {
t.Errorf("github.GetGithubAccessTokenFromEnv returned an error, wanted none; %s", err)
}
})
}
}
func TestGetRepoSucceeds(t *testing.T) {
tests := []struct {
testName string
owner string
repository string
gitProvider string
personal bool
}{
{
testName: "personal repo succeeds",
owner: "Jeff",
repository: "testRepo",
gitProvider: github.GitProviderName,
personal: true,
},
{
testName: "organizational repo succeeds",
owner: "Jeff",
repository: "testRepo",
gitProvider: github.GitProviderName,
personal: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
githubproviderclient := mocks.NewMockGithubClient(mockCtrl)
getRepoOpts := git.GetRepoOpts{Owner: tt.owner, Repository: tt.repository}
testRepo := &git.Repository{Name: tt.repository, Owner: tt.owner, Organization: "", CloneUrl: "https://github.com/user/repo"}
githubproviderclient.EXPECT().GetRepo(context.Background(), getRepoOpts).Return(testRepo, nil)
config := &v1alpha1.GithubProviderConfig{
Owner: tt.owner,
Repository: tt.repository,
Personal: tt.personal,
}
validPATValue := validPATValues[rand.Intn(len(validPATValues))]
auth := git.TokenAuth{Token: validPATValue, Username: tt.owner}
githubProvider, err := github.New(githubproviderclient, config, auth)
if err != nil {
t.Errorf("instantiating github provider: %v, wanted nil", err)
}
repo, err := githubProvider.GetRepo(context.Background())
if err != nil {
t.Errorf("calling Repo %v, wanted nil", err)
}
assert.Equal(t, testRepo, repo)
})
}
}
func TestGetNonExistantRepoSucceeds(t *testing.T) {
tests := []struct {
testName string
owner string
repository string
authTokenPath string
gitProvider string
personal bool
}{
{
testName: "personal repo succeeds",
owner: "Jeff",
repository: "testRepo",
authTokenPath: "",
gitProvider: github.GitProviderName,
personal: true,
},
{
testName: "organizational repo succeeds",
owner: "Jeff",
repository: "testRepo",
authTokenPath: "",
gitProvider: github.GitProviderName,
personal: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
githubproviderclient := mocks.NewMockGithubClient(mockCtrl)
getRepoOpts := git.GetRepoOpts{Owner: tt.owner, Repository: tt.repository}
githubproviderclient.EXPECT().GetRepo(context.Background(), getRepoOpts).Return(nil, &git.RepositoryDoesNotExistError{})
config := &v1alpha1.GithubProviderConfig{
Owner: tt.owner,
Repository: tt.repository,
Personal: tt.personal,
}
validPATValue := validPATValues[rand.Intn(len(validPATValues))]
auth := git.TokenAuth{Token: validPATValue, Username: tt.owner}
githubProvider, err := github.New(githubproviderclient, config, auth)
if err != nil {
t.Errorf("instantiating github provider: %v, wanted nil", err)
}
repo, err := githubProvider.GetRepo(context.Background())
if err != nil {
t.Errorf("calling Repo %v, wanted nil", err)
}
var nilRepo *git.Repository
assert.Equal(t, nilRepo, repo)
})
}
}
| 243 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/git/providers/github (interfaces: GithubClient)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
git "github.com/aws/eks-anywhere/pkg/git"
gomock "github.com/golang/mock/gomock"
github "github.com/google/go-github/v35/github"
)
// MockGithubClient is a mock of GithubClient interface.
type MockGithubClient struct {
ctrl *gomock.Controller
recorder *MockGithubClientMockRecorder
}
// MockGithubClientMockRecorder is the mock recorder for MockGithubClient.
type MockGithubClientMockRecorder struct {
mock *MockGithubClient
}
// NewMockGithubClient creates a new mock instance.
func NewMockGithubClient(ctrl *gomock.Controller) *MockGithubClient {
mock := &MockGithubClient{ctrl: ctrl}
mock.recorder = &MockGithubClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGithubClient) EXPECT() *MockGithubClientMockRecorder {
return m.recorder
}
// AddDeployKeyToRepo mocks base method.
func (m *MockGithubClient) AddDeployKeyToRepo(arg0 context.Context, arg1 git.AddDeployKeyOpts) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDeployKeyToRepo", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// AddDeployKeyToRepo indicates an expected call of AddDeployKeyToRepo.
func (mr *MockGithubClientMockRecorder) AddDeployKeyToRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeployKeyToRepo", reflect.TypeOf((*MockGithubClient)(nil).AddDeployKeyToRepo), arg0, arg1)
}
// AuthenticatedUser mocks base method.
func (m *MockGithubClient) AuthenticatedUser(arg0 context.Context) (*github.User, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AuthenticatedUser", arg0)
ret0, _ := ret[0].(*github.User)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AuthenticatedUser indicates an expected call of AuthenticatedUser.
func (mr *MockGithubClientMockRecorder) AuthenticatedUser(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthenticatedUser", reflect.TypeOf((*MockGithubClient)(nil).AuthenticatedUser), arg0)
}
// CheckAccessTokenPermissions mocks base method.
func (m *MockGithubClient) CheckAccessTokenPermissions(arg0, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckAccessTokenPermissions", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CheckAccessTokenPermissions indicates an expected call of CheckAccessTokenPermissions.
func (mr *MockGithubClientMockRecorder) CheckAccessTokenPermissions(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckAccessTokenPermissions", reflect.TypeOf((*MockGithubClient)(nil).CheckAccessTokenPermissions), arg0, arg1)
}
// CreateRepo mocks base method.
func (m *MockGithubClient) CreateRepo(arg0 context.Context, arg1 git.CreateRepoOpts) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRepo", arg0, arg1)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateRepo indicates an expected call of CreateRepo.
func (mr *MockGithubClientMockRecorder) CreateRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRepo", reflect.TypeOf((*MockGithubClient)(nil).CreateRepo), arg0, arg1)
}
// DeleteRepo mocks base method.
func (m *MockGithubClient) DeleteRepo(arg0 context.Context, arg1 git.DeleteRepoOpts) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteRepo", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteRepo indicates an expected call of DeleteRepo.
func (mr *MockGithubClientMockRecorder) DeleteRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRepo", reflect.TypeOf((*MockGithubClient)(nil).DeleteRepo), arg0, arg1)
}
// GetAccessTokenPermissions mocks base method.
func (m *MockGithubClient) GetAccessTokenPermissions(arg0 string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAccessTokenPermissions", arg0)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetAccessTokenPermissions indicates an expected call of GetAccessTokenPermissions.
func (mr *MockGithubClientMockRecorder) GetAccessTokenPermissions(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccessTokenPermissions", reflect.TypeOf((*MockGithubClient)(nil).GetAccessTokenPermissions), arg0)
}
// GetRepo mocks base method.
func (m *MockGithubClient) GetRepo(arg0 context.Context, arg1 git.GetRepoOpts) (*git.Repository, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRepo", arg0, arg1)
ret0, _ := ret[0].(*git.Repository)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetRepo indicates an expected call of GetRepo.
func (mr *MockGithubClientMockRecorder) GetRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRepo", reflect.TypeOf((*MockGithubClient)(nil).GetRepo), arg0, arg1)
}
// Organization mocks base method.
func (m *MockGithubClient) Organization(arg0 context.Context, arg1 string) (*github.Organization, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Organization", arg0, arg1)
ret0, _ := ret[0].(*github.Organization)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Organization indicates an expected call of Organization.
func (mr *MockGithubClientMockRecorder) Organization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Organization", reflect.TypeOf((*MockGithubClient)(nil).Organization), arg0, arg1)
}
// PathExists mocks base method.
func (m *MockGithubClient) PathExists(arg0 context.Context, arg1, arg2, arg3, arg4 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PathExists", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PathExists indicates an expected call of PathExists.
func (mr *MockGithubClientMockRecorder) PathExists(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PathExists", reflect.TypeOf((*MockGithubClient)(nil).PathExists), arg0, arg1, arg2, arg3, arg4)
}
| 170 |
eks-anywhere | aws | Go | package flux
import (
"context"
"strconv"
"time"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
maxRetries = 5
backOffPeriod = 5 * time.Second
reconcileAnnotation = "kustomize.toolkit.fluxcd.io/reconcile"
)
// FluxClient is an interface that abstracts the basic commands of flux executable.
type FluxClient interface {
BootstrapGithub(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error
BootstrapGit(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig, cliConfig *config.CliConfig) error
Uninstall(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error
Reconcile(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error
}
// KubeClient is an interface that abstracts the basic commands of kubectl executable.
type KubeClient interface {
GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error)
UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...executables.KubectlOpt) error
RemoveAnnotation(ctx context.Context, resourceType, objectName string, key string, opts ...executables.KubectlOpt) error
DeleteSecret(ctx context.Context, managementCluster *types.Cluster, secretName, namespace string) error
}
type fluxClient struct {
flux FluxClient
kube KubeClient
*retrier.Retrier
}
func newFluxClient(flux FluxClient, kube KubeClient) *fluxClient {
return &fluxClient{
flux: flux,
kube: kube,
Retrier: retrier.NewWithMaxRetries(maxRetries, backOffPeriod),
}
}
func (c *fluxClient) BootstrapGithub(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
return c.Retry(
func() error {
return c.flux.BootstrapGithub(ctx, cluster, fluxConfig)
},
)
}
func (c *fluxClient) BootstrapGit(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig, cliConfig *config.CliConfig) error {
return c.Retry(
func() error {
return c.flux.BootstrapGit(ctx, cluster, fluxConfig, cliConfig)
},
)
}
func (c *fluxClient) Uninstall(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
return c.Retry(
func() error {
return c.flux.Uninstall(ctx, cluster, fluxConfig)
},
)
}
func (c *fluxClient) Reconcile(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error {
return c.Retry(
func() error {
return c.flux.Reconcile(ctx, cluster, fluxConfig)
},
)
}
func (c *fluxClient) ForceReconcile(ctx context.Context, cluster *types.Cluster, namespace string) error {
annotations := map[string]string{
"reconcile.fluxcd.io/requestedAt": strconv.FormatInt(time.Now().Unix(), 10),
}
return c.Retry(
func() error {
return c.kube.UpdateAnnotation(ctx, "gitrepositories", namespace, annotations, executables.WithOverwrite(), executables.WithCluster(cluster), executables.WithNamespace(namespace))
},
)
}
func (c *fluxClient) DisableResourceReconcile(ctx context.Context, cluster *types.Cluster, resourceType, objectName, namespace string) error {
annotations := map[string]string{
reconcileAnnotation: "disabled",
}
return c.Retry(
func() error {
return c.kube.UpdateAnnotation(ctx, resourceType, objectName, annotations, executables.WithOverwrite(), executables.WithCluster(cluster), executables.WithNamespace(namespace))
},
)
}
func (c *fluxClient) EnableResourceReconcile(ctx context.Context, cluster *types.Cluster, resourceType, objectName, namespace string) error {
return c.Retry(
func() error {
return c.kube.RemoveAnnotation(ctx, resourceType, objectName, reconcileAnnotation, executables.WithOverwrite(), executables.WithCluster(cluster), executables.WithNamespace(namespace))
},
)
}
func (c *fluxClient) DeleteSystemSecret(ctx context.Context, cluster *types.Cluster, namespace string) error {
return c.Retry(
func() error {
return c.kube.DeleteSecret(ctx, cluster, "flux-system", namespace)
},
)
}
func (c *fluxClient) GetCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) (eksaCluster *v1alpha1.Cluster, err error) {
err = c.Retry(
func() error {
eksaCluster, err = c.kube.GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name)
return err
},
)
return eksaCluster, err
}
| 133 |
eks-anywhere | aws | Go | package flux
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/gitops/flux/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
type fluxClientTest struct {
*WithT
ctx context.Context
c *fluxClient
f *mocks.MockFluxClient
k *mocks.MockKubeClient
cluster *types.Cluster
fluxConfig *v1alpha1.FluxConfig
}
func newFluxClientTest(t *testing.T) *fluxClientTest {
ctrl := gomock.NewController(t)
f := mocks.NewMockFluxClient(ctrl)
k := mocks.NewMockKubeClient(ctrl)
c := newFluxClient(f, k)
c.Retrier = retrier.NewWithMaxRetries(maxRetries, 0)
return &fluxClientTest{
WithT: NewWithT(t),
ctx: context.Background(),
c: c,
f: f,
k: k,
cluster: &types.Cluster{},
fluxConfig: &v1alpha1.FluxConfig{},
}
}
func TestFluxClientBootstrapGithubSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().BootstrapGithub(tt.ctx, tt.cluster, tt.fluxConfig).Return(errors.New("error in bootstrap github")).Times(4)
tt.f.EXPECT().BootstrapGithub(tt.ctx, tt.cluster, tt.fluxConfig).Return(nil).Times(1)
tt.Expect(tt.c.BootstrapGithub(tt.ctx, tt.cluster, tt.fluxConfig)).To(Succeed(), "fluxClient.BootstrapGithub() should succeed with 5 tries")
}
func TestFluxClientBootstrapGithubError(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().BootstrapGithub(tt.ctx, tt.cluster, tt.fluxConfig).Return(errors.New("error in bootstrap github")).Times(5)
tt.f.EXPECT().BootstrapGithub(tt.ctx, tt.cluster, tt.fluxConfig).Return(nil).AnyTimes()
tt.Expect(tt.c.BootstrapGithub(tt.ctx, tt.cluster, tt.fluxConfig)).To(MatchError(ContainSubstring("error in bootstrap github")), "fluxClient.BootstrapGithub() should fail after 5 tries")
}
func TestFluxClientBootstrapGitSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().BootstrapGit(tt.ctx, tt.cluster, tt.fluxConfig, nil).Return(errors.New("error in bootstrap git")).Times(4)
tt.f.EXPECT().BootstrapGit(tt.ctx, tt.cluster, tt.fluxConfig, nil).Return(nil).Times(1)
tt.Expect(tt.c.BootstrapGit(tt.ctx, tt.cluster, tt.fluxConfig, nil)).To(Succeed(), "fluxClient.BootstrapGit() should succeed with 5 tries")
}
func TestFluxClientBootstrapGitError(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().BootstrapGit(tt.ctx, tt.cluster, tt.fluxConfig, nil).Return(errors.New("error in bootstrap git")).Times(5)
tt.f.EXPECT().BootstrapGit(tt.ctx, tt.cluster, tt.fluxConfig, nil).Return(nil).AnyTimes()
tt.Expect(tt.c.BootstrapGit(tt.ctx, tt.cluster, tt.fluxConfig, nil)).To(MatchError(ContainSubstring("error in bootstrap git")), "fluxClient.BootstrapGit() should fail after 5 tries")
}
func TestFluxClientUninstallSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().Uninstall(tt.ctx, tt.cluster, tt.fluxConfig).Return(errors.New("error in uninstall")).Times(4)
tt.f.EXPECT().Uninstall(tt.ctx, tt.cluster, tt.fluxConfig).Return(nil).Times(1)
tt.Expect(tt.c.Uninstall(tt.ctx, tt.cluster, tt.fluxConfig)).To(Succeed(), "fluxClient.Uninstall() should succeed with 5 tries")
}
func TestFluxClientUninstallError(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().Uninstall(tt.ctx, tt.cluster, tt.fluxConfig).Return(errors.New("error in uninstall")).Times(5)
tt.f.EXPECT().Uninstall(tt.ctx, tt.cluster, tt.fluxConfig).Return(nil).AnyTimes()
tt.Expect(tt.c.Uninstall(tt.ctx, tt.cluster, tt.fluxConfig)).To(MatchError(ContainSubstring("error in uninstall")), "fluxClient.Uninstall() should fail after 5 tries")
}
func TestFluxClientEnableResourceReconcileSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().RemoveAnnotation(tt.ctx, "cluster", "test-cluster", "kustomize.toolkit.fluxcd.io/reconcile", gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error in remove annotation")).Times(4)
tt.k.EXPECT().RemoveAnnotation(tt.ctx, "cluster", "test-cluster", "kustomize.toolkit.fluxcd.io/reconcile", gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1)
tt.Expect(tt.c.EnableResourceReconcile(tt.ctx, tt.cluster, "cluster", "test-cluster", "default")).To(Succeed(), "fluxClient.EnableResourceReconcile() should succeed with 5 tries")
}
func TestFluxClientEnableResourceReconcileError(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().RemoveAnnotation(tt.ctx, "cluster", "test-cluster", "kustomize.toolkit.fluxcd.io/reconcile", gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error in remove annotation")).Times(5)
tt.k.EXPECT().RemoveAnnotation(tt.ctx, "cluster", "test-cluster", "kustomize.toolkit.fluxcd.io/reconcile", gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
tt.Expect(tt.c.EnableResourceReconcile(tt.ctx, tt.cluster, "cluster", "test-cluster", "default")).To(MatchError(ContainSubstring("error in remove annotation")), "fluxClient.EnableResourceReconcile() should fail after 5 tries")
}
func TestFluxClientDisableResourceReconcileSuccess(t *testing.T) {
tt := newFluxClientTest(t)
annotations := map[string]string{
"kustomize.toolkit.fluxcd.io/reconcile": "disabled",
}
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "cluster", "test-cluster", annotations, gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error in add annotation")).Times(4)
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "cluster", "test-cluster", annotations, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1)
tt.Expect(tt.c.DisableResourceReconcile(tt.ctx, tt.cluster, "cluster", "test-cluster", "default")).To(Succeed(), "fluxClient.DisableResourceReconcile() should succeed with 5 tries")
}
func TestFluxClientDisableResourceReconcileError(t *testing.T) {
tt := newFluxClientTest(t)
annotations := map[string]string{
"kustomize.toolkit.fluxcd.io/reconcile": "disabled",
}
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "cluster", "test-cluster", annotations, gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error in add annotation")).Times(5)
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "cluster", "test-cluster", annotations, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
tt.Expect(tt.c.DisableResourceReconcile(tt.ctx, tt.cluster, "cluster", "test-cluster", "default")).To(MatchError(ContainSubstring("error in add annotation")), "fluxClient.DisableResourceReconcile() should fail after 5 tries")
}
func TestFluxClientReconcileSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().Reconcile(tt.ctx, tt.cluster, tt.fluxConfig).Return(errors.New("error in reconcile")).Times(4)
tt.f.EXPECT().Reconcile(tt.ctx, tt.cluster, tt.fluxConfig).Return(nil).Times(1)
tt.Expect(tt.c.Reconcile(tt.ctx, tt.cluster, tt.fluxConfig)).To(Succeed(), "fluxClient.Reconcile() should succeed with 5 tries")
}
func TestFluxClientReconcileError(t *testing.T) {
tt := newFluxClientTest(t)
tt.f.EXPECT().Reconcile(tt.ctx, tt.cluster, tt.fluxConfig).Return(errors.New("error in reconcile")).Times(5)
tt.f.EXPECT().Reconcile(tt.ctx, tt.cluster, tt.fluxConfig).Return(nil).AnyTimes()
tt.Expect(tt.c.Reconcile(tt.ctx, tt.cluster, tt.fluxConfig)).To(MatchError(ContainSubstring("error in reconcile")), "fluxClient.Reconcile() should fail after 5 tries")
}
func TestFluxClientForceReconcileSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "gitrepositories", "flux-system", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error in force reconcile")).Times(4)
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "gitrepositories", "flux-system", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1)
tt.Expect(tt.c.ForceReconcile(tt.ctx, tt.cluster, "flux-system")).To(Succeed(), "fluxClient.ForceReconcile() should succeed with 5 tries")
}
func TestFluxClientForceReconcileError(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "gitrepositories", "flux-system", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error in force reconcile")).Times(5)
tt.k.EXPECT().UpdateAnnotation(tt.ctx, "gitrepositories", "flux-system", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
tt.Expect(tt.c.ForceReconcile(tt.ctx, tt.cluster, "flux-system")).To(MatchError(ContainSubstring("error in force reconcile")), "fluxClient.ForceReconcile() should fail after 5 tries")
}
func TestFluxClientDeleteSystemSecretSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().DeleteSecret(tt.ctx, tt.cluster, "flux-system", "custom-namespace").Return(errors.New("error in delete secret")).Times(4)
tt.k.EXPECT().DeleteSecret(tt.ctx, tt.cluster, "flux-system", "custom-namespace").Return(nil).Times(1)
tt.Expect(tt.c.DeleteSystemSecret(tt.ctx, tt.cluster, "custom-namespace")).To(Succeed(), "fluxClient.DeleteSystemSecret() should succeed with 5 tries")
}
func TestFluxClientDeleteSystemSecretError(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().DeleteSecret(tt.ctx, tt.cluster, "flux-system", "custom-namespace").Return(errors.New("error in delete secret")).Times(5)
tt.k.EXPECT().DeleteSecret(tt.ctx, tt.cluster, "flux-system", "custom-namespace").Return(nil).AnyTimes()
tt.Expect(tt.c.DeleteSystemSecret(tt.ctx, tt.cluster, "custom-namespace")).To(MatchError(ContainSubstring("error in delete secret")), "fluxClient.DeleteSystemSecret() should fail after 5 tries")
}
func TestFluxClientGetClusterSuccess(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, "fluxTestCluster").Return(nil, errors.New("error in get eksa cluster")).Times(4)
tt.k.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, "fluxTestCluster").Return(nil, nil).Times(1)
_, err := tt.c.GetCluster(tt.ctx, tt.cluster, test.NewClusterSpec())
tt.Expect(err).To(Succeed(), "fluxClient.GetCluster() should succeed with 5 tries")
}
func TestFluxClientGetClusterError(t *testing.T) {
tt := newFluxClientTest(t)
tt.k.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, "fluxTestCluster").Return(nil, errors.New("error in get eksa cluster")).Times(5)
tt.k.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, "fluxTestCluster").Return(nil, nil).AnyTimes()
_, err := tt.c.GetCluster(tt.ctx, tt.cluster, test.NewClusterSpec())
tt.Expect(err).To(MatchError(ContainSubstring("error in get eksa cluster")), "fluxClient.GetCluster() should fail after 5 tries")
}
| 198 |
eks-anywhere | aws | Go | package flux
import (
"context"
"errors"
"fmt"
"path"
"path/filepath"
"strings"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/validations"
)
// fluxForCluster bundles the Flux struct with a specific clusterSpec, so that all the git and file write
// operations for the clusterSpec can be done in each structure method.
type fluxForCluster struct {
*Flux
clusterSpec *cluster.Spec
datacenterConfig providers.DatacenterConfig
machineConfigs []providers.MachineConfig
}
func newFluxForCluster(flux *Flux, clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) *fluxForCluster {
return &fluxForCluster{
Flux: flux,
clusterSpec: clusterSpec,
datacenterConfig: datacenterConfig,
machineConfigs: machineConfigs,
}
}
// commitFluxAndClusterConfigToGit commits the cluster configuration file to the flux-managed git repository.
// If the remote repository does not exist it will initialize a local repository and push it to the configured remote.
// It will generate the kustomization file and marshal the cluster configuration file to the required locations in the repo.
// These will later be used by Flux and our controllers to reconcile the repository contents and the cluster configuration.
func (fc *fluxForCluster) commitFluxAndClusterConfigToGit(ctx context.Context) error {
logger.Info("Adding cluster configuration files to Git")
config := fc.clusterSpec.FluxConfig
if err := fc.validateLocalConfigPathDoesNotExist(); err != nil {
return err
}
g := NewFileGenerator()
if err := g.Init(fc.writer, fc.eksaSystemDir(), fc.fluxSystemDir()); err != nil {
return err
}
if err := g.WriteEksaFiles(fc.clusterSpec, fc.datacenterConfig, fc.machineConfigs); err != nil {
return fmt.Errorf("writing eks-a config files: %v", err)
}
if fc.clusterSpec.Cluster.IsSelfManaged() {
if err := g.WriteFluxSystemFiles(fc.clusterSpec); err != nil {
return fmt.Errorf("writing flux system files: %v", err)
}
}
p := path.Dir(config.Spec.ClusterConfigPath)
if err := fc.gitClient.Add(p); err != nil {
return fmt.Errorf("adding %s to git: %v", p, err)
}
if err := fc.Flux.pushToRemoteRepo(ctx, p, initialClusterconfigCommitMessage); err != nil {
return err
}
logger.V(3).Info("Finished pushing cluster config and flux custom manifest files to git")
return nil
}
func (fc *fluxForCluster) syncGitRepo(ctx context.Context) error {
if !validations.FileExists(path.Join(fc.writer.Dir(), ".git")) {
if err := fc.clone(ctx); err != nil {
return fmt.Errorf("cloning git repo: %v", err)
}
} else {
// Make sure the local git repo is on the branch specified in config and up-to-date with the remote
if err := fc.gitClient.Branch(fc.branch()); err != nil {
return fmt.Errorf("switching to git branch %s: %v", fc.branch(), err)
}
}
return nil
}
func (fc *fluxForCluster) initializeProviderRepositoryIfNotExists(ctx context.Context) (*git.Repository, error) {
// If git provider, the repository should be pre-initialized by the user.
if fc.clusterSpec.FluxConfig.Spec.Git != nil {
return &git.Repository{}, nil
}
r, err := fc.gitClient.GetRepo(ctx)
if err != nil {
return nil, fmt.Errorf("describing repo: %v", err)
}
if r != nil {
return r, nil
}
if err = fc.createRemoteRepository(ctx); err != nil {
return nil, err
}
if err = fc.initializeLocalRepository(); err != nil {
return nil, err
}
return nil, nil
}
// setupRepository will set up the repository which will house the GitOps configuration for the cluster.
// if the repository exists and is not empty, it will be cloned.
// if the repository exists but is empty, it will be initialized locally, as a bare repository cannot be cloned.
// if the repository does not exist, it will be created and then initialized locally.
func (fc *fluxForCluster) setupRepository(ctx context.Context) (err error) {
r, err := fc.initializeProviderRepositoryIfNotExists(ctx)
if err != nil {
return err
}
if r != nil {
err = fc.clone(ctx)
}
var repoEmptyErr *git.RepositoryIsEmptyError
if errors.As(err, &repoEmptyErr) {
logger.V(3).Info("remote repository is empty and can't be cloned; will initialize locally")
if initErr := fc.initializeLocalRepository(); initErr != nil {
return fmt.Errorf("initializing local repository: %v", initErr)
}
return nil
}
return err
}
func (fc *fluxForCluster) clone(ctx context.Context) error {
logger.V(3).Info("Cloning remote repository")
if err := fc.gitClient.Clone(ctx); err != nil {
return err
}
logger.V(3).Info("Creating a new branch")
return fc.gitClient.Branch(fc.branch())
}
// createRemoteRepository will create a repository in the remote git provider with the user-provided configuration.
func (fc *fluxForCluster) createRemoteRepository(ctx context.Context) error {
logger.V(3).Info("Remote Github repo does not exist; will create and initialize", "repo", fc.repository(), "owner", fc.owner())
opts := git.CreateRepoOpts{
Name: fc.repository(),
Owner: fc.owner(),
Description: "EKS-A cluster configuration repository",
Personal: fc.personal(),
Privacy: true,
}
logger.V(4).Info("Creating remote Github repo", "options", opts)
if err := fc.gitClient.CreateRepo(ctx, opts); err != nil {
return fmt.Errorf("creating repo: %v", err)
}
return nil
}
// initializeLocalRepository will git init the local repository directory, initialize a git repository.
// it will then change branches to the branch specified in the GitOps configuration.
func (fc *fluxForCluster) initializeLocalRepository() error {
if err := fc.gitClient.Init(); err != nil {
return fmt.Errorf("initializing repository: %v", err)
}
// git requires at least one commit in the repo to branch from
if err := fc.gitClient.Commit("initializing repository"); err != nil {
return fmt.Errorf("committing to repository: %v", err)
}
if err := fc.gitClient.Branch(fc.branch()); err != nil {
return fmt.Errorf("creating branch: %v", err)
}
return nil
}
// validateLocalConfigPathDoesNotExist returns an exception if the cluster configuration file exists.
// This is done so that we avoid clobbering existing cluster configurations in the user-provided git repository.
func (fc *fluxForCluster) validateLocalConfigPathDoesNotExist() error {
if fc.clusterSpec.Cluster.IsSelfManaged() {
p := path.Join(fc.writer.Dir(), fc.path())
if validations.FileExists(p) {
return fmt.Errorf("a cluster configuration file already exists at path %s", p)
}
}
return nil
}
func (fc *fluxForCluster) validateRemoteConfigPathDoesNotExist(ctx context.Context) error {
if !fc.clusterSpec.Cluster.IsSelfManaged() || fc.gitClient == nil {
return nil
}
exists, err := fc.gitClient.PathExists(ctx, fc.owner(), fc.repository(), fc.branch(), fc.path())
if err != nil {
return fmt.Errorf("failed validating remote flux config path: %v", err)
}
if exists {
return fmt.Errorf("flux path %s already exists in remote repository", fc.path())
}
return nil
}
func (fc *fluxForCluster) namespace() string {
return fc.clusterSpec.FluxConfig.Spec.SystemNamespace
}
func (fc *fluxForCluster) repository() string {
if fc.clusterSpec.FluxConfig.Spec.Github != nil {
return fc.clusterSpec.FluxConfig.Spec.Github.Repository
}
if fc.clusterSpec.FluxConfig.Spec.Git != nil {
r := fc.clusterSpec.FluxConfig.Spec.Git.RepositoryUrl
return path.Base(strings.TrimSuffix(r, filepath.Ext(r)))
}
return ""
}
func (fc *fluxForCluster) owner() string {
if fc.clusterSpec.FluxConfig.Spec.Github != nil {
return fc.clusterSpec.FluxConfig.Spec.Github.Owner
}
return ""
}
func (fc *fluxForCluster) branch() string {
return fc.clusterSpec.FluxConfig.Spec.Branch
}
func (fc *fluxForCluster) personal() bool {
if fc.clusterSpec.FluxConfig.Spec.Github != nil {
return fc.clusterSpec.FluxConfig.Spec.Github.Personal
}
return false
}
func (fc *fluxForCluster) path() string {
return fc.clusterSpec.FluxConfig.Spec.ClusterConfigPath
}
func (fc *fluxForCluster) eksaSystemDir() string {
return path.Join(fc.path(), fc.clusterSpec.Cluster.GetName(), eksaSystemDirName)
}
func (fc *fluxForCluster) fluxSystemDir() string {
return path.Join(fc.path(), fc.namespace())
}
| 263 |
eks-anywhere | aws | Go | package flux
import (
_ "embed"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermarshaller"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/templater"
)
const (
eksaSystemDirName = "eksa-system"
kustomizeFileName = "kustomization.yaml"
clusterConfigFileName = "eksa-cluster.yaml"
fluxSyncFileName = "gotk-sync.yaml"
fluxPatchFileName = "gotk-patches.yaml"
)
//go:embed manifests/eksa-system/kustomization.yaml
var eksaKustomizeContent string
//go:embed manifests/flux-system/kustomization.yaml
var fluxKustomizeContent string
//go:embed manifests/flux-system/gotk-sync.yaml
var fluxSyncContent string
//go:embed manifests/flux-system/gotk-patches.yaml
var fluxPatchContent string
type Templater interface {
WriteToFile(templateContent string, data interface{}, fileName string, f ...filewriter.FileOptionsFunc) (filePath string, err error)
}
type FileGenerator struct {
fluxWriter, eksaWriter filewriter.FileWriter
fluxTemplater, eksaTemplater Templater
}
func NewFileGenerator() *FileGenerator {
return &FileGenerator{}
}
// NewFileGeneratorWithWriterTemplater takes flux and eksa writer and templater interface to build the generator.
// This is only for testing.
func NewFileGeneratorWithWriterTemplater(fluxWriter, eksaWriter filewriter.FileWriter, fluxTemplater, eksaTemplater Templater) *FileGenerator {
return &FileGenerator{
fluxWriter: fluxWriter,
eksaWriter: eksaWriter,
fluxTemplater: fluxTemplater,
eksaTemplater: eksaTemplater,
}
}
func (g *FileGenerator) Init(writer filewriter.FileWriter, eksaSystemDir, fluxSystemDir string) error {
eksaWriter, err := writer.WithDir(eksaSystemDir)
if err != nil {
return fmt.Errorf("initializing eks-a system writer: %v", err)
}
eksaWriter.CleanUpTemp()
fluxWriter, err := writer.WithDir(fluxSystemDir)
if err != nil {
return fmt.Errorf("initializing flux system writer: %v", err)
}
fluxWriter.CleanUpTemp()
g.eksaWriter = eksaWriter
g.fluxWriter = fluxWriter
g.eksaTemplater = templater.New(eksaWriter)
g.fluxTemplater = templater.New(fluxWriter)
return nil
}
func (g *FileGenerator) WriteEksaFiles(clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) error {
if datacenterConfig == nil && machineConfigs == nil {
return nil
}
if err := g.WriteClusterConfig(clusterSpec, datacenterConfig, machineConfigs); err != nil {
return err
}
if err := g.WriteEksaKustomization(); err != nil {
return err
}
return nil
}
func (g *FileGenerator) WriteFluxSystemFiles(clusterSpec *cluster.Spec) error {
if err := g.WriteFluxKustomization(clusterSpec); err != nil {
return err
}
if err := g.WriteFluxSync(); err != nil {
return err
}
if err := g.WriteFluxPatch(clusterSpec); err != nil {
return err
}
return nil
}
func (g *FileGenerator) WriteClusterConfig(clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) error {
specs, err := clustermarshaller.MarshalClusterSpec(clusterSpec, datacenterConfig, machineConfigs)
if err != nil {
return err
}
if filePath, err := g.eksaWriter.Write(clusterConfigFileName, specs, filewriter.PersistentFile); err != nil {
return fmt.Errorf("writing eks-a cluster config file into %s: %v", filePath, err)
}
return nil
}
func (g *FileGenerator) WriteEksaKustomization() error {
values := map[string]string{
"ConfigFileName": clusterConfigFileName,
}
if path, err := g.eksaTemplater.WriteToFile(eksaKustomizeContent, values, kustomizeFileName, filewriter.PersistentFile); err != nil {
return fmt.Errorf("writing eks-a kustomization manifest file into %s: %v", path, err)
}
return nil
}
func (g *FileGenerator) WriteFluxKustomization(clusterSpec *cluster.Spec) error {
values := map[string]string{
"Namespace": clusterSpec.FluxConfig.Spec.SystemNamespace,
}
if path, err := g.fluxTemplater.WriteToFile(fluxKustomizeContent, values, kustomizeFileName, filewriter.PersistentFile); err != nil {
return fmt.Errorf("creating flux-system kustomization manifest file into %s: %v", path, err)
}
return nil
}
func (g *FileGenerator) WriteFluxSync() error {
if path, err := g.fluxTemplater.WriteToFile(fluxSyncContent, nil, fluxSyncFileName, filewriter.PersistentFile); err != nil {
return fmt.Errorf("creating flux-system sync manifest file into %s: %v", path, err)
}
return nil
}
func (g *FileGenerator) WriteFluxPatch(clusterSpec *cluster.Spec) error {
values := map[string]string{
"Namespace": clusterSpec.FluxConfig.Spec.SystemNamespace,
"SourceControllerImage": clusterSpec.VersionsBundle.Flux.SourceController.VersionedImage(),
"KustomizeControllerImage": clusterSpec.VersionsBundle.Flux.KustomizeController.VersionedImage(),
"HelmControllerImage": clusterSpec.VersionsBundle.Flux.HelmController.VersionedImage(),
"NotificationControllerImage": clusterSpec.VersionsBundle.Flux.NotificationController.VersionedImage(),
}
if path, err := g.fluxTemplater.WriteToFile(fluxPatchContent, values, fluxPatchFileName, filewriter.PersistentFile); err != nil {
return fmt.Errorf("creating flux-system patch manifest file into %s: %v", path, err)
}
return nil
}
| 165 |
eks-anywhere | aws | Go | package flux_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
writerMocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/gitops/flux"
fluxMocks "github.com/aws/eks-anywhere/pkg/gitops/flux/mocks"
"github.com/aws/eks-anywhere/pkg/providers"
)
var wantConfig = `apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Cluster
metadata:
name: test-cluster
namespace: default
spec:
clusterNetwork:
cniConfig: {}
pods: {}
services: {}
controlPlaneConfiguration: {}
datacenterRef: {}
gitOpsRef:
kind: FluxConfig
name: test-gitops
kubernetesVersion: "1.19"
managementCluster:
name: test-cluster
---
kind: VSphereDatacenterConfig
metadata:
name: test-cluster
namespace: default
spec:
datacenter: SDDC-Datacenter
insecure: false
network: ""
server: ""
thumbprint: ""
---
kind: VSphereMachineConfig
metadata:
name: test-cluster
namespace: default
spec:
datastore: ""
folder: ""
memoryMiB: 0
numCPUs: 0
osFamily: ""
resourcePool: ""
template: /SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.19.6
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: FluxConfig
metadata:
name: test-gitops
namespace: default
spec:
branch: testBranch
clusterConfigPath: clusters/test-cluster
github:
owner: mFolwer
personal: true
repository: testRepo
systemNamespace: flux-system
---
`
var wantEksaKustomization = `apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- {{.ConfigFileName}}`
var wantFluxKustomization = `apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: {{.Namespace}}
resources:
- gotk-components.yaml
- gotk-sync.yaml
patchesStrategicMerge:
- gotk-patches.yaml`
var wantFluxPatches = `apiVersion: apps/v1
kind: Deployment
metadata:
name: source-controller
namespace: {{.Namespace}}
spec:
template:
spec:
containers:
- image: {{.SourceControllerImage}}
name: manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kustomize-controller
namespace: {{.Namespace}}
spec:
template:
spec:
containers:
- image: {{.KustomizeControllerImage}}
name: manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: helm-controller
namespace: {{.Namespace}}
spec:
template:
spec:
containers:
- image: {{.HelmControllerImage}}
name: manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: notification-controller
namespace: {{.Namespace}}
spec:
template:
spec:
containers:
- image: {{.NotificationControllerImage}}
name: manager`
var wantPatchesValues = map[string]string{
"Namespace": "flux-system",
"SourceControllerImage": "public.ecr.aws/l0g8r8j6/fluxcd/source-controller:v0.12.1-8539f509df046a4f567d2182dde824b957136599",
"KustomizeControllerImage": "public.ecr.aws/l0g8r8j6/fluxcd/kustomize-controller:v0.11.1-d82011942ec8a447ba89a70ff9a84bf7b9579492",
"HelmControllerImage": "public.ecr.aws/l0g8r8j6/fluxcd/helm-controller:v0.10.0-d82011942ec8a447ba89a70ff9a84bf7b9579492",
"NotificationControllerImage": "public.ecr.aws/l0g8r8j6/fluxcd/notification-controller:v0.13.0-d82011942ec8a447ba89a70ff9a84bf7b9579492",
}
type fileGeneratorTest struct {
*WithT
ctx context.Context
g *flux.FileGenerator
w *writerMocks.MockFileWriter
t *fluxMocks.MockTemplater
clusterSpec *cluster.Spec
datacenterConfig providers.DatacenterConfig
machineConfigs []providers.MachineConfig
}
func newFileGeneratorTest(t *testing.T) *fileGeneratorTest {
ctrl := gomock.NewController(t)
writer := writerMocks.NewMockFileWriter(ctrl)
templater := fluxMocks.NewMockTemplater(ctrl)
clusterName := "test-cluster"
return &fileGeneratorTest{
WithT: NewWithT(t),
ctx: context.Background(),
g: flux.NewFileGeneratorWithWriterTemplater(writer, writer, templater, templater),
w: writer,
t: templater,
clusterSpec: newClusterSpec(t, v1alpha1.NewCluster(clusterName), ""),
datacenterConfig: datacenterConfig(clusterName),
machineConfigs: []providers.MachineConfig{machineConfig(clusterName)},
}
}
func TestFileGeneratorInitSuccess(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.w.EXPECT().WithDir("dir1").Return(tt.w, nil)
tt.w.EXPECT().WithDir("dir2").Return(tt.w, nil)
tt.w.EXPECT().CleanUpTemp().Times(2)
tt.Expect(tt.g.Init(tt.w, "dir1", "dir2")).To(Succeed())
}
func TestFileGeneratorInitEksaWriterError(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.w.EXPECT().WithDir("dir1").Return(nil, errors.New("error in writer dir1"))
tt.Expect(tt.g.Init(tt.w, "dir1", "dir2")).To(MatchError(ContainSubstring("error in writer dir1")))
}
func TestFileGeneratorInitFluxWriterError(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.w.EXPECT().WithDir("dir1").Return(tt.w, nil)
tt.w.EXPECT().CleanUpTemp()
tt.w.EXPECT().WithDir("dir2").Return(nil, errors.New("error in writer dir2"))
tt.Expect(tt.g.Init(tt.w, "dir1", "dir2")).To(MatchError(ContainSubstring("error in writer dir2")))
}
func TestFileGeneratorWriteEksaFilesSuccess(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.w.EXPECT().Write("eksa-cluster.yaml", []byte(wantConfig), gomock.Any()).Return("", nil)
tt.t.EXPECT().WriteToFile(wantEksaKustomization, map[string]string{"ConfigFileName": "eksa-cluster.yaml"}, "kustomization.yaml", gomock.Any()).Return("", nil)
tt.Expect(tt.g.WriteEksaFiles(tt.clusterSpec, tt.datacenterConfig, tt.machineConfigs)).To(Succeed())
}
func TestFileGeneratorWriteEksaFilesSkip(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.Expect(tt.g.WriteEksaFiles(tt.clusterSpec, nil, nil)).To(Succeed())
}
func TestFileGeneratorWriteEksaFilesWriteError(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.w.EXPECT().Write("eksa-cluster.yaml", []byte(wantConfig), gomock.Any()).Return("", errors.New("error in write"))
tt.Expect(tt.g.WriteEksaFiles(tt.clusterSpec, tt.datacenterConfig, tt.machineConfigs)).To(MatchError(ContainSubstring("error in write")))
}
func TestFileGeneratorWriteEksaFilesWriteToFileError(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.w.EXPECT().Write("eksa-cluster.yaml", []byte(wantConfig), gomock.Any()).Return("", nil)
tt.t.EXPECT().WriteToFile(wantEksaKustomization, map[string]string{"ConfigFileName": "eksa-cluster.yaml"}, "kustomization.yaml", gomock.Any()).Return("", errors.New("error in write to file"))
tt.Expect(tt.g.WriteEksaFiles(tt.clusterSpec, tt.datacenterConfig, tt.machineConfigs)).To(MatchError(ContainSubstring("error in write to file")))
}
func TestFileGeneratorWriteFluxSystemFilesSuccess(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.t.EXPECT().WriteToFile(wantFluxKustomization, map[string]string{"Namespace": "flux-system"}, "kustomization.yaml", gomock.Any()).Return("", nil)
tt.t.EXPECT().WriteToFile("", nil, "gotk-sync.yaml", gomock.Any()).Return("", nil)
tt.t.EXPECT().WriteToFile(wantFluxPatches, wantPatchesValues, "gotk-patches.yaml", gomock.Any()).Return("", nil)
tt.Expect(tt.g.WriteFluxSystemFiles(tt.clusterSpec)).To(Succeed())
}
func TestFileGeneratorWriteFluxSystemFilesWriteFluxKustomizationError(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.t.EXPECT().WriteToFile(wantFluxKustomization, map[string]string{"Namespace": "flux-system"}, "kustomization.yaml", gomock.Any()).Return("", errors.New("error in write kustomization"))
tt.Expect(tt.g.WriteFluxSystemFiles(tt.clusterSpec)).To(MatchError(ContainSubstring("error in write kustomization")))
}
func TestFileGeneratorWriteFluxSystemFilesWriteFluxSyncError(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.t.EXPECT().WriteToFile(wantFluxKustomization, map[string]string{"Namespace": "flux-system"}, "kustomization.yaml", gomock.Any()).Return("", nil)
tt.t.EXPECT().WriteToFile("", nil, "gotk-sync.yaml", gomock.Any()).Return("", errors.New("error in write sync"))
tt.Expect(tt.g.WriteFluxSystemFiles(tt.clusterSpec)).To(MatchError(ContainSubstring("error in write sync")))
}
func TestFileGeneratorWriteFluxSystemFilesWriteFluxPatchesError(t *testing.T) {
tt := newFileGeneratorTest(t)
tt.t.EXPECT().WriteToFile(wantFluxKustomization, map[string]string{"Namespace": "flux-system"}, "kustomization.yaml", gomock.Any()).Return("", nil)
tt.t.EXPECT().WriteToFile("", nil, "gotk-sync.yaml", gomock.Any()).Return("", nil)
tt.t.EXPECT().WriteToFile(wantFluxPatches, wantPatchesValues, "gotk-patches.yaml", gomock.Any()).Return("", errors.New("error in write patches"))
tt.Expect(tt.g.WriteFluxSystemFiles(tt.clusterSpec)).To(MatchError(ContainSubstring("error in write patches")))
}
| 276 |
eks-anywhere | aws | Go | package flux
import (
"context"
"fmt"
"path"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/git"
gitFactory "github.com/aws/eks-anywhere/pkg/git/factory"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
const (
defaultRemote = "origin"
initialClusterconfigCommitMessage = "Initial commit of cluster configuration; generated by EKS-A CLI"
updateClusterconfigCommitMessage = "Update commit of cluster configuration; generated by EKS-A CLI"
deleteClusterconfigCommitMessage = "Delete commit of cluster configuration; generated by EKS-A CLI"
)
type GitOpsFluxClient interface {
BootstrapGithub(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error
BootstrapGit(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig, cliConfig *config.CliConfig) error
Uninstall(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error
GetCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) (eksaCluster *v1alpha1.Cluster, err error)
DisableResourceReconcile(ctx context.Context, cluster *types.Cluster, resourceType, objectName, namespace string) error
EnableResourceReconcile(ctx context.Context, cluster *types.Cluster, resourceType, objectName, namespace string) error
Reconcile(ctx context.Context, cluster *types.Cluster, fluxConfig *v1alpha1.FluxConfig) error
ForceReconcile(ctx context.Context, cluster *types.Cluster, namespace string) error
DeleteSystemSecret(ctx context.Context, cluster *types.Cluster, namespace string) error
}
type GitClient interface {
GetRepo(ctx context.Context) (repo *git.Repository, err error)
CreateRepo(ctx context.Context, opts git.CreateRepoOpts) error
Clone(ctx context.Context) error
Push(ctx context.Context) error
Pull(ctx context.Context, branch string) error
PathExists(ctx context.Context, owner, repo, branch, path string) (exists bool, err error)
Add(filename string) error
Remove(filename string) error
Commit(message string) error
Branch(name string) error
Init() error
}
type Flux struct {
fluxClient GitOpsFluxClient
gitClient GitClient
writer filewriter.FileWriter
cliConfig *config.CliConfig
}
func NewFlux(fluxClient FluxClient, kubeClient KubeClient, gitTools *gitFactory.GitTools, cliConfig *config.CliConfig) *Flux {
var w filewriter.FileWriter
if gitTools != nil {
w = gitTools.Writer
}
return &Flux{
fluxClient: newFluxClient(fluxClient, kubeClient),
gitClient: newGitClient(gitTools),
writer: w,
cliConfig: cliConfig,
}
}
func NewFluxFromGitOpsFluxClient(fluxClient GitOpsFluxClient, gitClient GitClient, writer filewriter.FileWriter, cliConfig *config.CliConfig) *Flux {
return &Flux{
fluxClient: fluxClient,
gitClient: gitClient,
writer: writer,
cliConfig: cliConfig,
}
}
func (f *Flux) InstallGitOps(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) error {
if f.shouldSkipFlux() {
logger.Info("GitOps field not specified, bootstrap flux skipped")
return nil
}
fc := newFluxForCluster(f, clusterSpec, datacenterConfig, machineConfigs)
if err := fc.setupRepository(ctx); err != nil {
return err
}
if err := fc.commitFluxAndClusterConfigToGit(ctx); err != nil {
return err
}
if err := f.Bootstrap(ctx, cluster, clusterSpec); err != nil {
return err
}
logger.V(4).Info("pulling from remote after Flux Bootstrap to ensure configuration files in local git repository are in sync",
"remote", defaultRemote, "branch", fc.branch())
if err := f.gitClient.Pull(ctx, fc.branch()); err != nil {
logger.Error(err, "error when pulling from remote repository after Flux Bootstrap; ensure local repository is up-to-date with remote (git pull)",
"remote", defaultRemote, "branch", fc.branch(), "error", err)
}
return nil
}
func (f *Flux) Bootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if err := f.BootstrapGithub(ctx, cluster, clusterSpec); err != nil {
_ = f.Uninstall(ctx, cluster, clusterSpec)
return fmt.Errorf("installing GitHub gitops: %v", err)
}
if err := f.BootstrapGit(ctx, cluster, clusterSpec); err != nil {
_ = f.Uninstall(ctx, cluster, clusterSpec)
return fmt.Errorf("installing generic git gitops: %v", err)
}
return nil
}
func (f *Flux) BootstrapGithub(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if cluster.ExistingManagement || clusterSpec.FluxConfig.Spec.Github == nil {
return nil
}
return f.fluxClient.BootstrapGithub(ctx, cluster, clusterSpec.FluxConfig)
}
func (f *Flux) BootstrapGit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if cluster.ExistingManagement || clusterSpec.FluxConfig.Spec.Git == nil {
return nil
}
return f.fluxClient.BootstrapGit(ctx, cluster, clusterSpec.FluxConfig, f.cliConfig)
}
func (f *Flux) Uninstall(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if err := f.fluxClient.Uninstall(ctx, cluster, clusterSpec.FluxConfig); err != nil {
logger.Info("Could not uninstall flux components", "error", err)
return err
}
return nil
}
func (f *Flux) PauseClusterResourcesReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error {
if f.shouldSkipFlux() {
logger.V(4).Info("GitOps field not specified, pause cluster resources reconcile skipped")
return nil
}
logger.V(3).Info("Pause Flux EKS-A resources reconcile")
if err := f.fluxClient.DisableResourceReconcile(ctx, cluster, clusterSpec.Cluster.ResourceType(), clusterSpec.Cluster.Name, clusterSpec.Cluster.Namespace); err != nil {
return fmt.Errorf("disable resource %s %s from Flux reconcile: %v", clusterSpec.Cluster.ResourceType(), clusterSpec.Cluster.Name, err)
}
if err := f.fluxClient.DisableResourceReconcile(ctx, cluster, provider.DatacenterResourceType(), clusterSpec.Cluster.Spec.DatacenterRef.Name, clusterSpec.Cluster.Namespace); err != nil {
return fmt.Errorf("disable resource %s %s from Flux reconcile: %v", provider.DatacenterResourceType(), clusterSpec.Cluster.Spec.DatacenterRef.Name, err)
}
if provider.MachineResourceType() != "" {
for _, machineConfigRef := range clusterSpec.Cluster.MachineConfigRefs() {
if err := f.fluxClient.DisableResourceReconcile(ctx, cluster, provider.MachineResourceType(), machineConfigRef.Name, clusterSpec.Cluster.Namespace); err != nil {
return fmt.Errorf("disable resource %s %s from Flux reconcile: %v", provider.MachineResourceType(), machineConfigRef.Name, err)
}
}
}
return nil
}
func (f *Flux) ResumeClusterResourcesReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error {
if f.shouldSkipFlux() {
logger.V(4).Info("GitOps field not specified, resume cluster resources reconcile skipped")
return nil
}
logger.V(3).Info("Resume Flux EKS-A resources reconcile")
if err := f.fluxClient.EnableResourceReconcile(ctx, cluster, clusterSpec.Cluster.ResourceType(), clusterSpec.Cluster.Name, clusterSpec.Cluster.Namespace); err != nil {
return fmt.Errorf("enable resource %s %s from Flux reconcile: %v", clusterSpec.Cluster.ResourceType(), clusterSpec.Cluster.Name, err)
}
if err := f.fluxClient.EnableResourceReconcile(ctx, cluster, provider.DatacenterResourceType(), clusterSpec.Cluster.Spec.DatacenterRef.Name, clusterSpec.Cluster.Namespace); err != nil {
return fmt.Errorf("enable resource %s %s from Flux reconcile: %v", provider.DatacenterResourceType(), clusterSpec.Cluster.Spec.DatacenterRef.Name, err)
}
if provider.MachineResourceType() != "" {
for _, machineConfigRef := range clusterSpec.Cluster.MachineConfigRefs() {
if err := f.fluxClient.EnableResourceReconcile(ctx, cluster, provider.MachineResourceType(), machineConfigRef.Name, clusterSpec.Cluster.Namespace); err != nil {
return fmt.Errorf("enable resource %s %s from Flux reconcile: %v", provider.MachineResourceType(), machineConfigRef.Name, err)
}
}
}
return nil
}
func (f *Flux) ForceReconcileGitRepo(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if f.shouldSkipFlux() {
logger.Info("GitOps not configured, force reconcile flux git repo skipped")
return nil
}
return f.fluxClient.ForceReconcile(ctx, cluster, clusterSpec.FluxConfig.Spec.SystemNamespace)
}
func (f *Flux) UpdateGitEksaSpec(ctx context.Context, clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) error {
if f.shouldSkipFlux() {
logger.Info("GitOps field not specified, update git repo skipped")
return nil
}
fc := newFluxForCluster(f, clusterSpec, datacenterConfig, machineConfigs)
if err := fc.syncGitRepo(ctx); err != nil {
return err
}
g := NewFileGenerator()
if err := g.Init(f.writer, fc.eksaSystemDir(), fc.fluxSystemDir()); err != nil {
return err
}
if err := g.WriteEksaFiles(clusterSpec, datacenterConfig, machineConfigs); err != nil {
return err
}
path := fc.eksaSystemDir()
if err := f.gitClient.Add(path); err != nil {
return fmt.Errorf("adding %s to git: %v", path, err)
}
if err := f.pushToRemoteRepo(ctx, path, updateClusterconfigCommitMessage); err != nil {
return err
}
logger.V(3).Info("Finished pushing updated cluster config file to git", "repository", fc.repository())
return nil
}
func (f *Flux) Validations(ctx context.Context, clusterSpec *cluster.Spec) []validations.Validation {
if f.shouldSkipFlux() {
return nil
}
fc := newFluxForCluster(f, clusterSpec, nil, nil)
return []validations.Validation{
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "Flux path",
Remediation: "Please provide a different path or different cluster name",
Err: fc.validateRemoteConfigPathDoesNotExist(ctx),
}
},
}
}
func (f *Flux) CleanupGitRepo(ctx context.Context, clusterSpec *cluster.Spec) error {
if f.shouldSkipFlux() {
logger.Info("GitOps field not specified, clean up git repo skipped")
return nil
}
fc := newFluxForCluster(f, clusterSpec, nil, nil)
if err := fc.syncGitRepo(ctx); err != nil {
return err
}
var p string
if clusterSpec.Cluster.IsManaged() {
p = fc.eksaSystemDir()
} else {
p = fc.path()
}
if !validations.FileExists(path.Join(f.writer.Dir(), p)) {
logger.V(3).Info("cluster dir does not exist in git, skip clean up")
return nil
}
if err := f.gitClient.Remove(p); err != nil {
return fmt.Errorf("removing %s in git: %v", p, err)
}
if err := f.pushToRemoteRepo(ctx, p, deleteClusterconfigCommitMessage); err != nil {
return err
}
logger.V(3).Info("Finished cleaning up cluster files in git",
"repository", fc.repository())
return nil
}
func (f *Flux) pushToRemoteRepo(ctx context.Context, path, msg string) error {
if err := f.gitClient.Commit(msg); err != nil {
return fmt.Errorf("committing %s to git: %v", path, err)
}
if err := f.gitClient.Push(ctx); err != nil {
return fmt.Errorf("pushing %s to git: %v", path, err)
}
return nil
}
func (f *Flux) shouldSkipFlux() bool {
return f.writer == nil
}
| 317 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.