repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package e2e
import (
"os"
"regexp"
e2etests "github.com/aws/eks-anywhere/test/framework"
)
const (
packagesRegex = `^.*CuratedPackages.*$`
certManagerRegex = "^.*CuratedPackagesCertManager.*$"
)
func (e *E2ESession) setupPackagesEnv(testRegex string) error {
re := regexp.MustCompile(packagesRegex)
if !re.MatchString(testRegex) {
return nil
}
requiredEnvVars := e2etests.RequiredPackagesEnvVars()
for _, eVar := range requiredEnvVars {
if val, ok := os.LookupEnv(eVar); ok {
e.testEnvVars[eVar] = val
}
}
return nil
}
func (e *E2ESession) setupCertManagerEnv(testRegex string) error {
re := regexp.MustCompile(certManagerRegex)
if !re.MatchString(testRegex) {
return nil
}
requiredEnvVars := e2etests.RequiredCertManagerEnvVars()
for _, eVar := range requiredEnvVars {
if val, ok := os.LookupEnv(eVar); ok {
e.testEnvVars[eVar] = val
}
}
return nil
}
| 44 |
eks-anywhere | aws | Go | package e2e
import (
"fmt"
"os"
"reflect"
"regexp"
"strings"
e2etests "github.com/aws/eks-anywhere/test/framework"
)
var proxyVarsByProvider = map[string]e2etests.ProxyRequiredEnvVars{
"CloudStack": e2etests.CloudstackProxyRequiredEnvVars,
"VSphere": e2etests.VsphereProxyRequiredEnvVars,
"Tinkerbell": e2etests.TinkerbellProxyRequiredEnvVars,
}
func (e *E2ESession) setupProxyEnv(testRegex string) error {
re := regexp.MustCompile(`^.*Proxy.*$`)
if !re.MatchString(testRegex) {
return nil
}
var requiredEnvVars e2etests.ProxyRequiredEnvVars
for key, vars := range proxyVarsByProvider {
if strings.Contains(testRegex, key) {
requiredEnvVars = vars
break
}
}
if reflect.ValueOf(requiredEnvVars).IsZero() {
return fmt.Errorf("proxy config for provider test %s was not found", testRegex)
}
for _, eVar := range []string{requiredEnvVars.HttpProxy, requiredEnvVars.HttpsProxy, requiredEnvVars.NoProxy} {
if val, ok := os.LookupEnv(eVar); ok {
e.testEnvVars[eVar] = val
}
}
return nil
}
| 43 |
eks-anywhere | aws | Go | package e2e
import (
"encoding/base64"
"fmt"
"net"
"os"
"regexp"
"github.com/go-logr/logr"
"github.com/aws/eks-anywhere/internal/pkg/ssm"
e2etests "github.com/aws/eks-anywhere/test/framework"
)
func (e *E2ESession) setupRegistryMirrorEnv(testRegex string) error {
re := regexp.MustCompile(`^.*RegistryMirror.*$`)
if !re.MatchString(testRegex) {
return nil
}
requiredEnvVars := e2etests.RequiredRegistryMirrorEnvVars()
for _, eVar := range requiredEnvVars {
if val, ok := os.LookupEnv(eVar); ok {
e.testEnvVars[eVar] = val
}
}
endpoint := e.testEnvVars[e2etests.RegistryEndpointVar]
port := e.testEnvVars[e2etests.RegistryPortVar]
caCert := e.testEnvVars[e2etests.RegistryCACertVar]
// Since Tinkerbell uses a separate harbor registry,
// we need to setup cert for that registry for Tinkerbell tests.
re = regexp.MustCompile(`^.*Tinkerbell.*$`)
if re.MatchString(testRegex) {
endpoint = e.testEnvVars[e2etests.RegistryEndpointTinkerbellVar]
port = e.testEnvVars[e2etests.RegistryPortTinkerbellVar]
caCert = e.testEnvVars[e2etests.RegistryCACertTinkerbellVar]
if err := e.mountRegistryCert(caCert, net.JoinHostPort(endpoint, port)); err != nil {
return err
}
}
// Since Authenticated tests needs to use separate harbor registries.
re = regexp.MustCompile(`^.*(VSphere|CloudStack).*Authenticated.*$`)
if re.MatchString(testRegex) {
endpoint = e.testEnvVars[e2etests.PrivateRegistryEndpointVar]
port = e.testEnvVars[e2etests.PrivateRegistryPortVar]
caCert = e.testEnvVars[e2etests.PrivateRegistryCACertVar]
} else if re = regexp.MustCompile(`^.*Tinkerbell.*Authenticated.*$`); re.MatchString(testRegex) {
endpoint = e.testEnvVars[e2etests.PrivateRegistryEndpointTinkerbellVar]
port = e.testEnvVars[e2etests.PrivateRegistryPortTinkerbellVar]
caCert = e.testEnvVars[e2etests.PrivateRegistryCACertTinkerbellVar]
}
if endpoint != "" && port != "" && caCert != "" {
return e.mountRegistryCert(caCert, net.JoinHostPort(endpoint, port))
}
re = regexp.MustCompile(`^.*Docker.*Airgapped.*$`)
if re.MatchString(testRegex) {
err := os.Setenv("DEFAULT_SECURITY_GROUP", e.testEnvVars[e2etests.RegistryMirrorDefaultSecurityGroup])
if err != nil {
return fmt.Errorf("unable to set DEFAULT_SECURITY_GROUP: %v", err)
}
err = os.Setenv("AIRGAPPED_SECURITY_GROUP", e.testEnvVars[e2etests.RegistryMirrorAirgappedSecurityGroup])
if err != nil {
return fmt.Errorf("unable to set AIRGAPPED_SECURITY_GROUP: %v", err)
}
}
return nil
}
func (e *E2ESession) mountRegistryCert(cert string, endpoint string) error {
command := fmt.Sprintf("sudo mkdir -p /etc/docker/certs.d/%s", endpoint)
if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil {
return fmt.Errorf("creating directory in instance: %v", err)
}
decodedCert, err := base64.StdEncoding.DecodeString(cert)
if err != nil {
return fmt.Errorf("failed to decode certificate: %v", err)
}
command = fmt.Sprintf("sudo cat <<EOF>> /etc/docker/certs.d/%s/ca.crt\n%s\nEOF", endpoint, string(decodedCert))
if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil {
return fmt.Errorf("mounting certificate in instance: %v", err)
}
return err
}
| 95 |
eks-anywhere | aws | Go | package e2e
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/go-logr/logr"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/pkg/s3"
"github.com/aws/eks-anywhere/internal/pkg/ssm"
"github.com/aws/eks-anywhere/pkg/networkutils"
)
const (
testResultPass = "pass"
testResultFail = "fail"
testResultError = "error"
maxIPPoolSize = 10
minIPPoolSize = 1
// Default timeout for E2E test instance.
e2eTimeout = 300 * time.Minute
e2eSSMTimeoutPadding = 10 * time.Minute
// Default timeout used for all SSM commands besides running the actual E2E test.
ssmTimeout = 10 * time.Minute
)
type ParallelRunConf struct {
TestInstanceConfigFile string
MaxInstances int
MaxConcurrentTests int
InstanceProfileName string
StorageBucket string
JobId string
Regex string
TestsToSkip []string
BundlesOverride bool
CleanupVms bool
TestReportFolder string
BranchName string
BaremetalBranchName string
Logger logr.Logger
}
type (
testCommandResult = ssm.RunOutput
instanceTestsResults struct {
conf instanceRunConf
testCommandResult *testCommandResult
err error
}
)
// RunTestsInParallel Run Tests in parallel by spawning multiple admin machines.
func RunTestsInParallel(conf ParallelRunConf) error {
testsList, skippedTests, err := listTests(conf.Regex, conf.TestsToSkip)
if err != nil {
return err
}
conf.Logger.Info("Running tests", "selected", testsList, "skipped", skippedTests)
if conf.TestReportFolder != "" {
if err = os.MkdirAll(conf.TestReportFolder, os.ModePerm); err != nil {
return err
}
}
var wg sync.WaitGroup
instancesConf, err := splitTests(testsList, conf)
if err != nil {
return fmt.Errorf("failed to split tests: %v", err)
}
results := make([]instanceTestsResults, 0, len(instancesConf))
logTestGroups(conf.Logger, instancesConf)
maxConcurrentTests := conf.MaxConcurrentTests
// Add a blocking channel to only allow for certain number of tests to run at a time
queue := make(chan struct{}, maxConcurrentTests)
for _, instanceConf := range instancesConf {
queue <- struct{}{}
go func(c instanceRunConf) {
defer wg.Done()
r := instanceTestsResults{conf: c}
r.conf.instanceId, r.testCommandResult, err = RunTests(c)
if err != nil {
r.err = err
}
results = append(results, r)
<-queue
}(instanceConf)
wg.Add(1)
}
wg.Wait()
close(queue)
failedInstances := 0
totalInstances := len(instancesConf)
completedInstances := 0
for _, r := range results {
var result string
// TODO: keeping the old logs temporarily for compatibility with the test tool
// Once the tool is updated to support the unified message, remove them
if r.err != nil {
result = testResultError
conf.Logger.Error(r.err, "Failed running e2e tests for instance", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "tests", r.conf.regex, "status", testResultFail)
failedInstances += 1
} else if !r.testCommandResult.Successful() {
result = testResultFail
conf.Logger.Info("An e2e instance run has failed", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "commandId", r.testCommandResult.CommandId, "tests", r.conf.regex, "status", testResultFail)
failedInstances += 1
} else {
result = testResultPass
conf.Logger.Info("Instance tests completed successfully", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "commandId", r.testCommandResult.CommandId, "tests", r.conf.regex, "status", testResultPass)
}
completedInstances += 1
conf.Logger.Info("Instance tests run finished",
"result", result,
"tests", r.conf.regex,
"jobId", r.conf.jobId,
"instanceId", r.conf.instanceId,
"completedInstances", completedInstances,
"totalInstances", totalInstances,
)
}
if failedInstances > 0 {
return fmt.Errorf("%d/%d e2e instances failed", failedInstances, totalInstances)
}
return nil
}
type instanceRunConf struct {
session *session.Session
instanceProfileName, storageBucket, jobId, parentJobId, regex, instanceId string
testReportFolder, branchName string
ipPool networkutils.IPPool
hardware []*api.Hardware
bundlesOverride bool
testRunnerType TestRunnerType
testRunnerConfig TestInfraConfig
cleanupVms bool
logger logr.Logger
}
func RunTests(conf instanceRunConf) (testInstanceID string, testCommandResult *testCommandResult, err error) {
testRunner, err := newTestRunner(conf.testRunnerType, conf.testRunnerConfig)
if err != nil {
return "", nil, err
}
instanceId, err := testRunner.createInstance(conf)
if err != nil {
return "", nil, err
}
conf.logger.V(1).Info("TestRunner instance has been created", "instanceId", instanceId)
defer func() {
err := testRunner.decommInstance(conf)
if err != nil {
conf.logger.V(1).Info("WARN: Failed to decomm e2e test runner instance", "error", err)
}
}()
session, err := newE2ESession(instanceId, conf)
if err != nil {
return "", nil, err
}
err = session.setup(conf.regex)
if err != nil {
return session.instanceId, nil, err
}
testCommandResult, err = session.runTests(conf.regex)
if err != nil {
return session.instanceId, nil, err
}
if err = conf.runPostTestsProcessing(session, testCommandResult); err != nil {
return session.instanceId, nil, err
}
// Tagging only successful e2e test instances.
// The aws cleanup periodic job deletes the tagged EC2 instances and long lived instances.
if testCommandResult.Successful() {
key := "Integration-Test-Done"
value := "TRUE"
err = testRunner.tagInstance(conf, key, value)
if err != nil {
return session.instanceId, nil, fmt.Errorf("tagging instance for e2e success: %v", err)
}
}
return session.instanceId, testCommandResult, nil
}
func (e *E2ESession) runTests(regex string) (testCommandResult *testCommandResult, err error) {
e.logger.V(1).Info("Running e2e tests", "regex", regex)
command := "GOVERSION=go1.16.6 gotestsum --junitfile=junit-testing.xml --raw-command --format=standard-verbose --hide-summary=all --ignore-non-json-output-lines -- test2json -t -p e2e ./bin/e2e.test -test.v"
if regex != "" {
command = fmt.Sprintf("%s -test.run \"^(%s)$\" -test.timeout %s", command, regex, e2eTimeout)
}
command = e.commandWithEnvVars(command)
opt := ssm.WithOutputToCloudwatch()
testCommandResult, err = ssm.RunCommand(
e.session,
e.logger.V(4),
e.instanceId,
command,
e2eTimeout+e2eSSMTimeoutPadding,
opt,
)
if err != nil {
return nil, fmt.Errorf("running e2e tests on instance %s: %v", e.instanceId, err)
}
return testCommandResult, nil
}
func (c instanceRunConf) runPostTestsProcessing(e *E2ESession, testCommandResult *testCommandResult) error {
regex := strings.Trim(c.regex, "\"")
tests := strings.Split(regex, "|")
for _, testName := range tests {
e.uploadJUnitReportFromInstance(testName)
if c.testReportFolder != "" {
e.downloadJUnitReportToLocalDisk(testName, c.testReportFolder)
}
if !testCommandResult.Successful() {
// For Tinkerbell tests we run multiple tests on the same instance.
// Hence upload fails for passed tests within the instance.
// TODO (pokearu): Find a way to only upload for failed tests within the instance.
e.uploadGeneratedFilesFromInstance(testName)
e.uploadDiagnosticArchiveFromInstance(testName)
}
}
return nil
}
func (e *E2ESession) commandWithEnvVars(command string) string {
fullCommand := make([]string, 0, len(e.testEnvVars)+1)
for k, v := range e.testEnvVars {
fullCommand = append(fullCommand, fmt.Sprintf("export %s=\"%s\"", k, v))
}
fullCommand = append(fullCommand, command)
return strings.Join(fullCommand, "; ")
}
func splitTests(testsList []string, conf ParallelRunConf) ([]instanceRunConf, error) {
testPerInstance := len(testsList) / conf.MaxInstances
if testPerInstance == 0 {
testPerInstance = 1
}
vsphereTestsRe := regexp.MustCompile(vsphereRegex)
tinkerbellTestsRe := regexp.MustCompile(tinkerbellTestsRe)
nutanixTestsRe := regexp.MustCompile(nutanixRegex)
privateNetworkTestsRe := regexp.MustCompile(`^.*(Proxy|RegistryMirror).*$`)
multiClusterTestsRe := regexp.MustCompile(`^.*Multicluster.*$`)
runConfs := make([]instanceRunConf, 0, conf.MaxInstances)
vsphereIPMan := newE2EIPManager(conf.Logger, os.Getenv(vsphereCidrVar))
vspherePrivateIPMan := newE2EIPManager(conf.Logger, os.Getenv(vspherePrivateNetworkCidrVar))
nutanixIPMan := newE2EIPManager(conf.Logger, os.Getenv(nutanixCidrVar))
awsSession, err := session.NewSession()
if err != nil {
return nil, fmt.Errorf("creating aws session for tests: %v", err)
}
testRunnerConfig, err := NewTestRunnerConfigFromFile(conf.Logger, conf.TestInstanceConfigFile)
if err != nil {
return nil, fmt.Errorf("creating test runner config for tests: %v", err)
}
testsInEC2Instance := make([]string, 0, testPerInstance)
for i, testName := range testsList {
if tinkerbellTestsRe.MatchString(testName) {
continue
}
testsInEC2Instance = append(testsInEC2Instance, testName)
multiClusterTest := multiClusterTestsRe.MatchString(testName)
var ips networkutils.IPPool
if vsphereTestsRe.MatchString(testName) {
if privateNetworkTestsRe.MatchString(testName) {
if multiClusterTest {
ips = vspherePrivateIPMan.reserveIPPool(maxIPPoolSize)
} else {
ips = vspherePrivateIPMan.reserveIPPool(minIPPoolSize)
}
} else {
if multiClusterTest {
ips = vsphereIPMan.reserveIPPool(maxIPPoolSize)
} else {
ips = vsphereIPMan.reserveIPPool(minIPPoolSize)
}
}
}
if nutanixTestsRe.MatchString(testName) {
ips = nutanixIPMan.reserveIPPool(minIPPoolSize)
}
if len(testsInEC2Instance) == testPerInstance || (len(testsList)-1) == i {
runConfs = append(runConfs, newInstanceRunConf(awsSession, conf, len(runConfs), strings.Join(testsInEC2Instance, "|"), ips, []*api.Hardware{}, Ec2TestRunnerType, testRunnerConfig))
testsInEC2Instance = make([]string, 0, testPerInstance)
}
}
if strings.EqualFold(conf.BranchName, conf.BaremetalBranchName) {
tinkerbellIPManager := newE2EIPManager(conf.Logger, os.Getenv(tinkerbellControlPlaneNetworkCidrEnvVar))
runConfs, err = appendNonAirgappedTinkerbellRunConfs(awsSession, testsList, conf, testRunnerConfig, runConfs, tinkerbellIPManager)
if err != nil {
return nil, fmt.Errorf("failed to split Tinkerbell tests: %v", err)
}
runConfs, err = appendAirgappedTinkerbellRunConfs(awsSession, testsList, conf, testRunnerConfig, runConfs, tinkerbellIPManager)
if err != nil {
return nil, fmt.Errorf("failed to run airgapped Tinkerbell tests: %v", err)
}
}
return runConfs, nil
}
//nolint:gocyclo // This legacy function is complex but the team too busy to simplify it
func appendNonAirgappedTinkerbellRunConfs(awsSession *session.Session, testsList []string, conf ParallelRunConf, testRunnerConfig *TestInfraConfig, runConfs []instanceRunConf, ipManager *E2EIPManager) ([]instanceRunConf, error) {
err := s3.DownloadToDisk(awsSession, os.Getenv(tinkerbellHardwareS3FileKeyEnvVar), conf.StorageBucket, e2eHardwareCsvFilePath)
if err != nil {
return nil, fmt.Errorf("failed to download tinkerbell hardware csv: %v", err)
}
hardware, err := api.NewHardwareSliceFromFile(e2eHardwareCsvFilePath)
if err != nil {
return nil, fmt.Errorf("failed to get Tinkerbell hardware: %v", err)
}
maxHardwarePerE2ETest := tinkerbellDefaultMaxHardwarePerE2ETest
maxHardwareEnvValue := os.Getenv(maxHardwarePerE2ETestEnvVar)
if len(maxHardwareEnvValue) > 0 {
maxHardwarePerE2ETest, err = strconv.Atoi(maxHardwareEnvValue)
if err != nil {
return nil, fmt.Errorf("failed to get Tinkerbell max hardware per test env var: %v", err)
}
}
conf.Logger.V(1).Info("INFO:", "totalHardware", len(hardware))
tinkerbellTests := getTinkerbellNonAirgappedTests(testsList)
conf.Logger.V(1).Info("INFO:", "tinkerbellTests", len(tinkerbellTests))
tinkTestInstances := len(hardware) / maxHardwarePerE2ETest
conf.Logger.V(1).Info("INFO:", "tinkTestInstances", tinkTestInstances)
tinkTestsPerInstance := 1
var remainingTests int
overflowTests := false
if len(tinkerbellTests) > tinkTestInstances {
tinkTestsPerInstance = len(tinkerbellTests) / tinkTestInstances
remainingTests = len(tinkerbellTests) % tinkTestInstances
if remainingTests != 0 {
tinkTestsPerInstance++
overflowTests = true
}
}
conf.Logger.V(1).Info("INFO:", "tinkTestsPerInstance", tinkTestsPerInstance)
conf.Logger.V(1).Info("INFO:", "tinkTestInstances", tinkTestInstances)
conf.Logger.V(1).Info("INFO:", "remainingTests", remainingTests)
hardwareChunks := api.SplitHardware(hardware, maxHardwarePerE2ETest)
testsInVSphereInstance := make([]string, 0, tinkTestsPerInstance)
for i, testName := range tinkerbellTests {
testsInVSphereInstance = append(testsInVSphereInstance, testName)
if len(testsInVSphereInstance) == tinkTestsPerInstance || (len(testsList)-1) == i {
conf.Logger.V(1).Info("INFO:", "hardwareChunksSize", len(hardwareChunks))
conf.Logger.V(1).Info("INFO:", "hardwareSize", len(hardware))
// each tinkerbell test requires 2 floating ip's (cp & tink server)
ips := ipManager.reserveIPPool(tinkTestsPerInstance * 2)
if len(hardwareChunks) > 0 {
hardware, hardwareChunks = hardwareChunks[0], hardwareChunks[1:]
}
runConfs = append(runConfs, newInstanceRunConf(awsSession, conf, len(runConfs), strings.Join(testsInVSphereInstance, "|"), ips, hardware, VSphereTestRunnerType, testRunnerConfig))
if remainingTests > 0 {
remainingTests--
}
if remainingTests == 0 && overflowTests {
tinkTestsPerInstance--
overflowTests = false
}
testsInVSphereInstance = make([]string, 0, tinkTestsPerInstance)
}
}
return runConfs, nil
}
func appendAirgappedTinkerbellRunConfs(awsSession *session.Session, testsList []string, conf ParallelRunConf, testRunnerConfig *TestInfraConfig, runConfs []instanceRunConf, ipManager *E2EIPManager) ([]instanceRunConf, error) {
tinkerbellTests := getTinkerbellAirgappedTests(testsList)
if len(tinkerbellTests) == 0 {
conf.Logger.V(1).Info("No tinkerbell airgapped test to run")
return runConfs, nil
}
conf.Logger.V(1).Info("INFO:", "tinkerbellAirGappedTests", len(tinkerbellTests))
err := s3.DownloadToDisk(awsSession, os.Getenv(tinkerbellAirgappedHardwareS3FileKeyEnvVar), conf.StorageBucket, e2eAirgappedHardwareCsvFilePath)
if err != nil {
return nil, fmt.Errorf("downloading tinkerbell airgapped hardware csv: %v", err)
}
hardware, err := api.NewHardwareSliceFromFile(e2eAirgappedHardwareCsvFilePath)
if err != nil {
return nil, fmt.Errorf("failed to get Tinkerbell hardware: %v", err)
}
conf.Logger.V(1).Info("INFO:", "totalAirgappedHardware", len(hardware))
pool := ipManager.reserveIPPool(len(tinkerbellTests) * 2)
runConfs = append(runConfs, newInstanceRunConf(awsSession, conf, len(runConfs), strings.Join(tinkerbellTests, "|"), pool, hardware, VSphereTestRunnerType, testRunnerConfig))
return runConfs, nil
}
func newInstanceRunConf(awsSession *session.Session, conf ParallelRunConf, jobNumber int, testRegex string, ipPool networkutils.IPPool, hardware []*api.Hardware, testRunnerType TestRunnerType, testRunnerConfig *TestInfraConfig) instanceRunConf {
jobID := fmt.Sprintf("%s-%d", conf.JobId, jobNumber)
return instanceRunConf{
session: awsSession,
instanceProfileName: conf.InstanceProfileName,
storageBucket: conf.StorageBucket,
jobId: jobID,
parentJobId: conf.JobId,
regex: testRegex,
ipPool: ipPool,
hardware: hardware,
bundlesOverride: conf.BundlesOverride,
testReportFolder: conf.TestReportFolder,
branchName: conf.BranchName,
cleanupVms: conf.CleanupVms,
testRunnerType: testRunnerType,
testRunnerConfig: *testRunnerConfig,
logger: conf.Logger.WithValues("jobID", jobID, "test", testRegex),
}
}
func logTestGroups(logger logr.Logger, instancesConf []instanceRunConf) {
testGroups := make([]string, 0, len(instancesConf))
for _, i := range instancesConf {
testGroups = append(testGroups, i.regex)
}
logger.V(1).Info("Running tests in parallel", "testsGroups", testGroups)
}
| 485 |
eks-anywhere | aws | Go | package e2e
import (
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/go-logr/logr"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/pkg/s3"
"github.com/aws/eks-anywhere/internal/pkg/ssm"
"github.com/aws/eks-anywhere/pkg/networkutils"
e2etests "github.com/aws/eks-anywhere/test/framework"
)
var requiredFiles = []string{cliBinary, e2eBinary}
const (
cliBinary = "eksctl-anywhere"
e2eBinary = "e2e.test"
bundlesReleaseManifestFile = "local-bundle-release.yaml"
eksAComponentsManifestFile = "local-eksa-components.yaml"
testNameFile = "e2e-test-name"
maxUserWatches = 524288
maxUserInstances = 512
key = "Integration-Test"
tag = "EKSA-E2E"
)
type E2ESession struct {
session *session.Session
instanceProfileName string
storageBucket string
jobId string
instanceId string
ipPool networkutils.IPPool
testEnvVars map[string]string
bundlesOverride bool
cleanupVms bool
requiredFiles []string
branchName string
hardware []*api.Hardware
logger logr.Logger
}
func newE2ESession(instanceId string, conf instanceRunConf) (*E2ESession, error) {
e := &E2ESession{
session: conf.session,
instanceId: instanceId,
instanceProfileName: conf.instanceProfileName,
storageBucket: conf.storageBucket,
jobId: conf.jobId,
ipPool: conf.ipPool,
testEnvVars: make(map[string]string),
bundlesOverride: conf.bundlesOverride,
cleanupVms: conf.cleanupVms,
requiredFiles: requiredFiles,
branchName: conf.branchName,
hardware: conf.hardware,
logger: conf.logger,
}
return e, nil
}
func (e *E2ESession) setup(regex string) error {
err := e.uploadRequiredFiles()
if err != nil {
return err
}
e.logger.V(1).Info("Waiting until SSM is ready")
err = ssm.WaitForSSMReady(e.session, e.instanceId, ssmTimeout)
if err != nil {
return fmt.Errorf("waiting for ssm in new instance: %v", err)
}
err = e.updateFSInotifyResources()
if err != nil {
return err
}
err = e.createTestNameFile(regex)
if err != nil {
return err
}
err = e.downloadRequiredFilesInInstance()
if err != nil {
return err
}
err = e.setupOIDC(regex)
if err != nil {
return err
}
err = e.setupVSphereEnv(regex)
if err != nil {
return err
}
err = e.setupTinkerbellEnv(regex)
if err != nil {
return err
}
err = e.setupCloudStackEnv(regex)
if err != nil {
return err
}
err = e.setupNutanixEnv(regex)
if err != nil {
return err
}
err = e.setupSnowEnv(regex)
if err != nil {
return err
}
err = e.setupFluxEnv(regex)
if err != nil {
return err
}
err = e.setupFluxGitEnv(regex)
if err != nil {
return err
}
err = e.setupProxyEnv(regex)
if err != nil {
return err
}
err = e.setupRegistryMirrorEnv(regex)
if err != nil {
return err
}
err = e.setupAwsIam(regex)
if err != nil {
return err
}
err = e.setupNTPEnv(regex)
if err != nil {
return err
}
err = e.setupBottlerocketKubernetesSettingsEnv(regex)
if err != nil {
return err
}
err = e.setupPackagesEnv(regex)
if err != nil {
return err
}
err = e.setupCertManagerEnv(regex)
if err != nil {
return err
}
ipPool := e.ipPool.ToString()
if ipPool != "" {
e.testEnvVars[e2etests.ClusterIPPoolEnvVar] = ipPool
}
// Adding JobId to Test Env variables
e.testEnvVars[e2etests.JobIdVar] = e.jobId
e.testEnvVars[e2etests.BundlesOverrideVar] = strconv.FormatBool(e.bundlesOverride)
e.testEnvVars[e2etests.CleanupVmsVar] = strconv.FormatBool(e.cleanupVms)
if e.branchName != "" {
e.testEnvVars[e2etests.BranchNameEnvVar] = e.branchName
}
e.testEnvVars[e2etests.ClusterPrefixVar] = clusterPrefix(e.branchName, e.instanceId)
return nil
}
func (e *E2ESession) updateFSInotifyResources() error {
command := fmt.Sprintf("sudo sysctl fs.inotify.max_user_watches=%v && sudo sysctl fs.inotify.max_user_instances=%v", maxUserWatches, maxUserInstances)
if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil {
return fmt.Errorf("updating fs inotify resources: %v", err)
}
e.logger.V(1).Info("Successfully updated the fs inotify user watches and instances")
return nil
}
func (e *E2ESession) uploadRequiredFile(file string) error {
uploadFile := fmt.Sprintf("bin/%s", file)
key := fmt.Sprintf("%s/%s", e.jobId, file)
e.logger.V(1).Info("Uploading file to s3 bucket", "file", file, "key", key)
err := s3.UploadFile(e.session, uploadFile, key, e.storageBucket)
if err != nil {
return fmt.Errorf("uploading file [%s]: %v", file, err)
}
return nil
}
func (e *E2ESession) uploadRequiredFiles() error {
if e.bundlesOverride {
e.requiredFiles = append(e.requiredFiles, bundlesReleaseManifestFile)
if _, err := os.Stat(fmt.Sprintf("bin/%s", eksAComponentsManifestFile)); err == nil {
e.requiredFiles = append(e.requiredFiles, eksAComponentsManifestFile)
} else if errors.Is(err, os.ErrNotExist) {
e.logger.V(0).Info("WARNING: no components manifest override found, but bundle override is present. " +
"If the EKS-A components have changed be sure to provide a components override!")
} else {
return err
}
}
for _, file := range e.requiredFiles {
err := e.uploadRequiredFile(file)
if err != nil {
return err
}
}
return nil
}
func (e *E2ESession) downloadRequiredFileInInstance(file string) error {
e.logger.V(1).Info("Downloading from s3 in instance", "file", file)
command := fmt.Sprintf("aws s3 cp s3://%s/%s/%[3]s ./bin/ && chmod 645 ./bin/%[3]s", e.storageBucket, e.jobId, file)
if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil {
return fmt.Errorf("downloading file in instance: %v", err)
}
e.logger.V(1).Info("Successfully downloaded file")
return nil
}
func (e *E2ESession) downloadRequiredFilesInInstance() error {
for _, file := range e.requiredFiles {
err := e.downloadRequiredFileInInstance(file)
if err != nil {
return err
}
}
return nil
}
func (e *E2ESession) createTestNameFile(testName string) error {
command := fmt.Sprintf("echo \"%s\" > %s", testName, testNameFile)
if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil {
return fmt.Errorf("creating test name file in instance: %v", err)
}
e.logger.V(1).Info("Successfully created test name file")
return nil
}
func clusterPrefix(branch, instanceId string) (clusterPrefix string) {
if branch == "" {
return instanceId
}
forbiddenChars := []string{"."}
sanitizedBranch := strings.ToLower(branch)
for _, char := range forbiddenChars {
sanitizedBranch = strings.ReplaceAll(sanitizedBranch, char, "-")
}
if len(sanitizedBranch) > 7 {
sanitizedBranch = sanitizedBranch[:7]
}
if len(instanceId) > 7 {
instanceId = instanceId[:7]
}
clusterPrefix = fmt.Sprintf("%s-%s", sanitizedBranch, instanceId)
return clusterPrefix
}
func (e *E2ESession) clusterName(branch, instanceId, testName string) (clusterName string) {
clusterName = fmt.Sprintf("%s-%s", clusterPrefix(branch, instanceId), e2etests.GetTestNameHash(testName))
if len(clusterName) > 63 {
e.logger.Info("Cluster name is longer than 63 characters; truncating to 63 characters.", "original cluster name", clusterName, "truncated cluster name", clusterName[:63])
clusterName = clusterName[:63]
}
return clusterName
}
| 297 |
eks-anywhere | aws | Go | package e2e
import (
"fmt"
"os"
"regexp"
"strings"
"sync"
"github.com/aws/eks-anywhere/internal/pkg/s3"
)
const (
snowCredentialsS3Path = "T_SNOW_CREDENTIALS_S3_PATH"
snowCertificatesS3Path = "T_SNOW_CERTIFICATES_S3_PATH"
snowDevices = "T_SNOW_DEVICES"
snowCPCidr = "T_SNOW_CONTROL_PLANE_CIDR"
snowCPCidrs = "T_SNOW_CONTROL_PLANE_CIDRS"
snowCredsFile = "EKSA_AWS_CREDENTIALS_FILE"
snowCertsFile = "EKSA_AWS_CA_BUNDLES_FILE"
snowTestsRe = `^.*Snow.*$`
snowCredsFilename = "snow_creds"
snowCertsFilename = "snow_certs"
)
var (
snowCPCidrArray []string
snowCPCidrArrayM sync.Mutex
)
func init() {
snowCPCidrArray = strings.Split(os.Getenv(snowCPCidrs), ",")
}
// Note that this function cannot be called more than the the number of cidrs in the list.
func getSnowCPCidr() (string, error) {
snowCPCidrArrayM.Lock()
defer snowCPCidrArrayM.Unlock()
if len(snowCPCidrArray) == 0 {
return "", fmt.Errorf("no more snow control plane cidrs available")
}
var r string
r, snowCPCidrArray = snowCPCidrArray[0], snowCPCidrArray[1:]
return r, nil
}
func (e *E2ESession) setupSnowEnv(testRegex string) error {
re := regexp.MustCompile(snowTestsRe)
if !re.MatchString(testRegex) {
return nil
}
e.testEnvVars[snowDevices] = os.Getenv(snowDevices)
cpCidr, err := getSnowCPCidr()
if err != nil {
return err
}
e.testEnvVars[snowCPCidr] = cpCidr
e.logger.V(1).Info("Assigned control plane CIDR to admin instance", "cidr", cpCidr, "instanceId", e.instanceId)
if err := sendFileViaS3(e, os.Getenv(snowCredentialsS3Path), snowCredsFilename); err != nil {
return err
}
if err := sendFileViaS3(e, os.Getenv(snowCertificatesS3Path), snowCertsFilename); err != nil {
return err
}
e.testEnvVars[snowCredsFile] = "bin/" + snowCredsFilename
e.testEnvVars[snowCertsFile] = "bin/" + snowCertsFilename
return nil
}
func sendFileViaS3(e *E2ESession, s3Path string, filename string) error {
if err := s3.DownloadToDisk(e.session, s3Path, e.storageBucket, "bin/"+filename); err != nil {
return err
}
err := e.uploadRequiredFile(filename)
if err != nil {
return fmt.Errorf("failed to upload file (%s) : %v", filename, err)
}
err = e.downloadRequiredFileInInstance(filename)
if err != nil {
return fmt.Errorf("failed to download file (%s) in admin instance : %v", filename, err)
}
return nil
}
| 91 |
eks-anywhere | aws | Go | package e2e
import (
"context"
"fmt"
"os"
"strconv"
"time"
aws_ssm "github.com/aws/aws-sdk-go/service/ssm"
"github.com/go-logr/logr"
"gopkg.in/yaml.v2"
"github.com/aws/eks-anywhere/internal/pkg/ec2"
"github.com/aws/eks-anywhere/internal/pkg/ssm"
"github.com/aws/eks-anywhere/internal/pkg/vsphere"
"github.com/aws/eks-anywhere/internal/test/cleanup"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/retrier"
)
const (
testRunnerVCUserEnvVar string = "TEST_RUNNER_GOVC_USERNAME"
testRunnerVCPasswordEnvVar string = "TEST_RUNNER_GOVC_PASSWORD"
govcUsernameKey string = "GOVC_USERNAME"
govcPasswordKey string = "GOVC_PASSWORD"
govcURLKey string = "GOVC_URL"
govcInsecure string = "GOVC_INSECURE"
govcDatacenterKey string = "GOVC_DATACENTER"
ssmActivationCodeKey string = "ssm_activation_code"
ssmActivationIdKey string = "ssm_activation_id"
ssmActivationRegionKey string = "ssm_activation_region"
)
type TestRunner interface {
createInstance(instanceConf instanceRunConf) (string, error)
tagInstance(instanceConf instanceRunConf, key, value string) error
decommInstance(instanceRunConf) error
}
type TestRunnerType string
const (
Ec2TestRunnerType TestRunnerType = "ec2"
VSphereTestRunnerType TestRunnerType = "vSphere"
)
func newTestRunner(runnerType TestRunnerType, config TestInfraConfig) (TestRunner, error) {
if runnerType == VSphereTestRunnerType {
var err error
v := &config.VSphereTestRunner
v.envMap, err = v.setEnvironment()
if err != nil {
return nil, fmt.Errorf("failed to set env for vSphere test runner: %v", err)
}
return v, nil
} else {
return &config.Ec2TestRunner, nil
}
}
type TestInfraConfig struct {
Ec2TestRunner `yaml:"ec2,omitempty"`
VSphereTestRunner `yaml:"vSphere,omitempty"`
}
func NewTestRunnerConfigFromFile(logger logr.Logger, configFile string) (*TestInfraConfig, error) {
file, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("failed to create test runner config from file: %v", err)
}
config := TestInfraConfig{}
config.VSphereTestRunner.logger = logger
config.Ec2TestRunner.logger = logger
err = yaml.Unmarshal(file, &config)
if err != nil {
return nil, fmt.Errorf("failed to create test runner config from file: %v", err)
}
return &config, nil
}
type testRunner struct {
InstanceID string
logger logr.Logger
}
type Ec2TestRunner struct {
testRunner
AmiID string `yaml:"amiId"`
SubnetID string `yaml:"subnetId"`
}
type VSphereTestRunner struct {
testRunner
ActivationId string
envMap map[string]string
Url string `yaml:"url"`
Insecure bool `yaml:"insecure"`
Library string `yaml:"library"`
Template string `yaml:"template"`
Datacenter string `yaml:"datacenter"`
Datastore string `yaml:"datastore"`
ResourcePool string `yaml:"resourcePool"`
Network string `yaml:"network"`
Folder string `yaml:"folder"`
}
func (v *VSphereTestRunner) setEnvironment() (map[string]string, error) {
envMap := make(map[string]string)
if vSphereUsername, ok := os.LookupEnv(testRunnerVCUserEnvVar); ok && len(vSphereUsername) > 0 {
envMap[govcUsernameKey] = vSphereUsername
} else {
return nil, fmt.Errorf("missing environment variable: %s", testRunnerVCUserEnvVar)
}
if vSpherePassword, ok := os.LookupEnv(testRunnerVCPasswordEnvVar); ok && len(vSpherePassword) > 0 {
envMap[govcPasswordKey] = vSpherePassword
} else {
return nil, fmt.Errorf("missing environment variable: %s", testRunnerVCPasswordEnvVar)
}
envMap[govcURLKey] = v.Url
envMap[govcInsecure] = strconv.FormatBool(v.Insecure)
envMap[govcDatacenterKey] = v.Datacenter
v.envMap = envMap
return envMap, nil
}
func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) {
name := getTestRunnerName(v.logger, c.jobId)
v.logger.V(1).Info("Creating vSphere Test Runner instance", "name", name)
ssmActivationInfo, err := ssm.CreateActivation(c.session, name, c.instanceProfileName)
if err != nil {
return "", fmt.Errorf("unable to create ssm activation: %v", err)
}
// TODO: import ova template from url if not exist
opts := vsphere.OVFDeployOptions{
Name: name,
PowerOn: true,
DiskProvisioning: "thin",
WaitForIP: true,
InjectOvfEnv: true,
NetworkMappings: []vsphere.NetworkMapping{{Name: v.Network, Network: v.Network}},
PropertyMapping: []vsphere.OVFProperty{
{Key: ssmActivationCodeKey, Value: ssmActivationInfo.ActivationCode},
{Key: ssmActivationIdKey, Value: ssmActivationInfo.ActivationID},
{Key: ssmActivationRegionKey, Value: *c.session.Config.Region},
},
}
// deploy template
if err := vsphere.DeployTemplate(v.envMap, v.Library, v.Template, name, v.Folder, v.Datacenter, v.Datastore, v.ResourcePool, opts); err != nil {
return "", err
}
var ssmInstance *aws_ssm.InstanceInformation
err = retrier.Retry(10, 5*time.Second, func() error {
ssmInstance, err = ssm.GetInstanceByActivationId(c.session, ssmActivationInfo.ActivationID)
if err != nil {
return fmt.Errorf("failed to get ssm instance info post ovf deployment: %v", err)
}
return nil
})
if err != nil {
return "", fmt.Errorf("waiting for ssm instance to activate %s : %v", name, err)
}
v.InstanceID = *ssmInstance.InstanceId
v.ActivationId = ssmActivationInfo.ActivationID
return *ssmInstance.InstanceId, nil
}
func (e *Ec2TestRunner) createInstance(c instanceRunConf) (string, error) {
name := getTestRunnerName(e.logger, c.jobId)
e.logger.V(1).Info("Creating ec2 Test Runner instance", "name", name)
instanceId, err := ec2.CreateInstance(c.session, e.AmiID, key, tag, c.instanceProfileName, e.SubnetID, name)
if err != nil {
return "", fmt.Errorf("creating instance for e2e tests: %v", err)
}
e.logger.V(1).Info("Instance created", "instance-id", instanceId)
e.InstanceID = instanceId
return instanceId, nil
}
func (v *VSphereTestRunner) tagInstance(c instanceRunConf, key, value string) error {
vmName := getTestRunnerName(v.logger, c.jobId)
vmPath := fmt.Sprintf("/%s/vm/%s/%s", v.Datacenter, v.Folder, vmName)
tag := fmt.Sprintf("%s:%s", key, value)
if err := vsphere.TagVirtualMachine(v.envMap, vmPath, tag); err != nil {
return fmt.Errorf("failed to tag vSphere test runner: %v", err)
}
return nil
}
func (e *Ec2TestRunner) tagInstance(c instanceRunConf, key, value string) error {
err := ec2.TagInstance(c.session, e.InstanceID, key, value)
if err != nil {
return fmt.Errorf("failed to tag Ec2 test runner: %v", err)
}
return nil
}
func (v *VSphereTestRunner) decommInstance(c instanceRunConf) error {
_, deregisterError := ssm.DeregisterInstance(c.session, v.InstanceID)
_, deactivateError := ssm.DeleteActivation(c.session, v.ActivationId)
deleteError := cleanup.VsphereRmVms(context.Background(), getTestRunnerName(v.logger, c.jobId), executables.WithGovcEnvMap(v.envMap))
if deregisterError != nil {
return fmt.Errorf("failed to decommission vsphere test runner ssm instance: %v", deregisterError)
}
if deactivateError != nil {
return fmt.Errorf("failed to decommission vsphere test runner ssm instance: %v", deactivateError)
}
if deleteError != nil {
return fmt.Errorf("failed to decommission vsphere test runner ssm instance: %v", deleteError)
}
return nil
}
func (v *Ec2TestRunner) decommInstance(c instanceRunConf) error {
return nil
}
func getTestRunnerName(logger logr.Logger, jobId string) string {
name := fmt.Sprintf("eksa-e2e-%s", jobId)
if len(name) > 80 {
logger.V(1).Info("Truncating test runner name to 80 chars", "original_name", name)
name = name[len(name)-80:]
logger.V(1).Info("Truncated test runner name", "truncated_name", name)
}
return name
}
| 245 |
eks-anywhere | aws | Go | package e2e
import (
"fmt"
"os"
"regexp"
"github.com/go-logr/logr"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/pkg/ssm"
e2etests "github.com/aws/eks-anywhere/test/framework"
)
const (
tinkerbellInventoryCsvFilePathEnvVar = "T_TINKERBELL_INVENTORY_CSV"
tinkerbellControlPlaneNetworkCidrEnvVar = "T_TINKERBELL_CP_NETWORK_CIDR"
tinkerbellHardwareS3FileKeyEnvVar = "T_TINKERBELL_S3_INVENTORY_CSV_KEY"
tinkerbellAirgappedHardwareS3FileKeyEnvVar = "T_TINKERBELL_S3_AG_INVENTORY_CSV_KEY"
tinkerbellTestsRe = `^.*Tinkerbell.*$`
e2eHardwareCsvFilePath = "e2e-inventory.csv"
e2eAirgappedHardwareCsvFilePath = "e2e-ag-inventory.csv"
maxHardwarePerE2ETestEnvVar = "T_TINKERBELL_MAX_HARDWARE_PER_TEST"
tinkerbellDefaultMaxHardwarePerE2ETest = 4
tinkerbellBootstrapInterfaceEnvVar = "T_TINKERBELL_BOOTSTRAP_INTERFACE"
)
func (e *E2ESession) setupTinkerbellEnv(testRegex string) error {
re := regexp.MustCompile(tinkerbellTestsRe)
if !re.MatchString(testRegex) {
return nil
}
requiredEnvVars := e2etests.RequiredTinkerbellEnvVars()
for _, eVar := range requiredEnvVars {
if val, ok := os.LookupEnv(eVar); ok {
e.testEnvVars[eVar] = val
}
}
inventoryFileName := fmt.Sprintf("%s.csv", getTestRunnerName(e.logger, e.jobId))
inventoryFilePath := fmt.Sprintf("bin/%s", inventoryFileName)
if _, err := os.Stat(inventoryFilePath); err == nil {
err = os.Remove(inventoryFilePath)
if err != nil {
e.logger.V(1).Info("WARN: Failed to clean up existing inventory csv", "file", inventoryFilePath)
}
}
err := api.WriteHardwareSliceToCSV(e.hardware, inventoryFilePath)
if err != nil {
return fmt.Errorf("failed to setup tinkerbell test environment: %v", err)
}
err = e.uploadRequiredFile(inventoryFileName)
if err != nil {
return fmt.Errorf("failed to upload tinkerbell inventory file (%s) : %v", inventoryFileName, err)
}
err = e.downloadRequiredFileInInstance(inventoryFileName)
if err != nil {
return fmt.Errorf("failed to download tinkerbell inventory file (%s) to test instance : %v", inventoryFileName, err)
}
tinkInterface := os.Getenv(tinkerbellBootstrapInterfaceEnvVar)
if tinkInterface == "" {
return fmt.Errorf("tinkerbell bootstrap interface env var is required: %s", tinkerbellBootstrapInterfaceEnvVar)
}
err = e.setTinkerbellBootstrapIPInInstance(tinkInterface)
if err != nil {
return fmt.Errorf("failed to set tinkerbell boostrap ip on interface (%s) in test instance : %v", tinkInterface, err)
}
e.testEnvVars[tinkerbellInventoryCsvFilePathEnvVar] = inventoryFilePath
e.testEnvVars[e2etests.TinkerbellCIEnvironment] = "true"
return nil
}
func (e *E2ESession) setTinkerbellBootstrapIPInInstance(tinkInterface string) error {
e.logger.V(1).Info("Setting Tinkerbell Bootstrap IP in instance")
command := fmt.Sprintf("export T_TINKERBELL_BOOTSTRAP_IP=$(/sbin/ip -o -4 addr list %s | awk '{print $4}' | cut -d/ -f1) && echo T_TINKERBELL_BOOTSTRAP_IP=\"$T_TINKERBELL_BOOTSTRAP_IP\" | tee -a /etc/environment", tinkInterface)
if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil {
return fmt.Errorf("setting tinkerbell boostrap ip: %v", err)
}
e.logger.V(1).Info("Successfully set tinkerbell boostrap ip")
return nil
}
// Get non airgapped, normal tinkerbell tests.
func getTinkerbellNonAirgappedTests(tests []string) []string {
tinkerbellTestsRe := regexp.MustCompile(tinkerbellTestsRe)
airgappedRe := regexp.MustCompile(`^.*Airgapped.*$`)
var tinkerbellTests []string
for _, testName := range tests {
if tinkerbellTestsRe.MatchString(testName) && !airgappedRe.MatchString(testName) {
tinkerbellTests = append(tinkerbellTests, testName)
}
}
return tinkerbellTests
}
func getTinkerbellAirgappedTests(tests []string) []string {
tinkerbellTestsRe := regexp.MustCompile(tinkerbellTestsRe)
airgappedRe := regexp.MustCompile(`^.*Airgapped.*$`)
var tinkerbellTests []string
for _, testName := range tests {
if tinkerbellTestsRe.MatchString(testName) && airgappedRe.MatchString(testName) {
tinkerbellTests = append(tinkerbellTests, testName)
}
}
return tinkerbellTests
}
| 121 |
eks-anywhere | aws | Go | package e2e
import (
"fmt"
"os"
"regexp"
"strings"
e2etests "github.com/aws/eks-anywhere/test/framework"
)
const (
vsphereCidrVar = "T_VSPHERE_CIDR"
vspherePrivateNetworkCidrVar = "T_VSPHERE_PRIVATE_NETWORK_CIDR"
vsphereRegex = `^.*VSphere.*$`
)
func (e *E2ESession) setupVSphereEnv(testRegex string) error {
re := regexp.MustCompile(vsphereRegex)
if !re.MatchString(testRegex) {
return nil
}
requiredEnvVars := e2etests.RequiredVsphereEnvVars()
for _, eVar := range requiredEnvVars {
if val, ok := os.LookupEnv(eVar); ok {
e.testEnvVars[eVar] = val
}
}
// This algorithm is not very efficient with two nested loops
// Making the assumption that VSphereExtraEnvVarPrefixes() returns a very small number of prefixes
// this should be ok and probably not worth the complexity of building a more complex data structure.
// If in the future we see the need to have a bigger number of prefixes, we will need
// to change this to avoid the n*m complexity
envVars := os.Environ()
for _, envVarPrefix := range e2etests.VSphereExtraEnvVarPrefixes() {
for _, envVar := range envVars {
if strings.HasPrefix(envVar, envVarPrefix) {
split := strings.Split(envVar, "=")
if len(split) != 2 {
return fmt.Errorf("invalid vsphere env var format, expected key=value: %s", envVar)
}
key := split[0]
value := split[1]
e.testEnvVars[key] = value
}
}
}
return nil
}
| 54 |
eks-anywhere | aws | Go | package e2e
import (
"testing"
. "github.com/onsi/gomega"
)
func TestE2ESessionsetupVSphereEnv(t *testing.T) {
g := NewWithT(t)
session := E2ESession{
testEnvVars: map[string]string{},
}
t.Setenv("T_VSPHERE_DATACENTER", "my-datacenter")
t.Setenv("T_VSPHERE_TEMPLATE_UBUNTU_KUBERNETES_1_20_EKS_19", "template-1-20")
t.Setenv("T_VSPHERE_TEMPLATE_UBUNTU_KUBERNETES_1_22_EKS_9", "template-1-22")
t.Setenv("T_VSPHERE_1_22_EKS_9", "template-1-22") // shouldn't be added
g.Expect(session.setupVSphereEnv("TestVSphere")).To(Succeed())
g.Expect(session.testEnvVars).To(HaveKeyWithValue("T_VSPHERE_DATACENTER", "my-datacenter"))
g.Expect(session.testEnvVars).To(HaveKeyWithValue("T_VSPHERE_TEMPLATE_UBUNTU_KUBERNETES_1_20_EKS_19", "template-1-20"))
g.Expect(session.testEnvVars).To(HaveKeyWithValue("T_VSPHERE_TEMPLATE_UBUNTU_KUBERNETES_1_22_EKS_9", "template-1-22"))
g.Expect(session.testEnvVars).NotTo(HaveKey("T_VSPHERE_1_22_EKS_9"))
}
| 27 |
eks-anywhere | aws | Go | package envtest
import (
"context"
"errors"
"testing"
"time"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// CreateObjs creates Objects using the provided kube client and waits until its cache
// has been updated with those objects.
func CreateObjs(ctx context.Context, t testing.TB, c client.Client, objs ...client.Object) {
t.Helper()
for _, o := range objs {
// we copy objects because the client modifies them while making creating/updating calls
obj := copyObject(t, o)
if err := c.Create(ctx, obj); isNamespace(obj) && apierrors.IsAlreadyExists(err) {
// namespaces can't be deleted
// assuming most tests just want the namespace to exist, since it already does
// we ignore the error
// for more advance usecases, handle namespaces manually outside of this helper
continue
} else if err != nil {
t.Fatal(err)
}
}
newStatuses := []updatedStatus{}
noStatusObjs := []client.Object{}
for _, o := range objs {
newStatus := updateStatus(ctx, t, c, o)
if newStatus != nil {
newStatuses = append(newStatuses, updatedStatus{
obj: o,
newStatus: newStatus,
})
} else {
noStatusObjs = append(noStatusObjs, o)
}
}
// If the status doesn't need to be updated, just wait for the object to
// to be available.
for _, o := range noStatusObjs {
waitForObjectAvailable(ctx, t, c, o)
}
for _, u := range newStatuses {
waitForStatusUpdated(ctx, t, c, u.obj, u.newStatus)
}
}
type updatedStatus struct {
obj client.Object
newStatus map[string]interface{}
}
// UpdateStatusAndWait updates an objects status subresource and waits until the cache refreshes
// and reflects the new status.
func UpdateStatusAndWait(ctx context.Context, t testing.TB, c client.Client, o client.Object) {
newStatus := updateStatus(ctx, t, c, o)
if newStatus != nil {
waitForStatusUpdated(ctx, t, c, o, newStatus)
}
}
func updateStatus(ctx context.Context, t testing.TB, c client.Client, o client.Object) (newStatus map[string]interface{}) {
objUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o)
if err != nil {
t.Fatalf("Failed converting object %s to unstructured: %v", klog.KObj(o), err)
}
obj := &unstructured.Unstructured{Object: objUnstructured}
newStatus, found, err := unstructured.NestedMap(objUnstructured, "status")
if err != nil {
t.Fatalf("Failed checking status for object %s: %v", klog.KObj(obj), err)
}
if !found || len(newStatus) == 0 {
return nil
}
objReady := waitForObjectAvailable(ctx, t, c, obj)
// We need to update the status independently, kubernetes doesn't allow to create the main objects and
// its subresources all at once
obj.SetResourceVersion(objReady.GetResourceVersion())
if err := c.Status().Update(ctx, obj); apierrors.IsNotFound(err) {
// Some objects without a subresource will fail here,
// so we just try and if it fails with a 404, we ignore the error
t.Logf(
"Try updating status but failed with a 404 error for [%s name=%s namespace=%s] object, most probably because it doesn't have a defined status subresource",
obj.GetObjectKind().GroupVersionKind().String(),
obj.GetName(),
obj.GetNamespace(),
)
} else if err != nil {
t.Fatal(err)
}
return newStatus
}
func waitForStatusUpdated(ctx context.Context, t testing.TB, c client.Client, o client.Object, newStatus map[string]interface{}) {
g := gomega.NewWithT(t)
g.Eventually(func(g gomega.Gomega) error {
updatedObj := &unstructured.Unstructured{}
updatedObj.SetGroupVersionKind(o.GetObjectKind().GroupVersionKind())
g.Expect(
c.Get(ctx, types.NamespacedName{Name: o.GetName(), Namespace: o.GetNamespace()}, updatedObj),
).To(gomega.Succeed())
updatedStatus, found, err := unstructured.NestedMap(updatedObj.Object, "status")
g.Expect(err).NotTo(gomega.HaveOccurred())
if !found {
return errors.New("no status found in updated object")
}
g.Expect(updatedStatus).To(gomega.Equal(newStatus), "updated status should be equal to desired status")
return nil
}, 5*time.Second).Should(gomega.Succeed(), "the status should be updated")
}
func waitForObjectAvailable(ctx context.Context, t testing.TB, c client.Client, obj client.Object) *unstructured.Unstructured {
unstructuredObj := &unstructured.Unstructured{}
for {
unstructuredObj.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind())
if err := c.Get(ctx, types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, unstructuredObj); err == nil {
break
} else if !apierrors.IsNotFound(err) {
t.Fatal(err)
}
}
return unstructuredObj
}
func isNamespace(obj client.Object) bool {
_, isNamespaceStruct := obj.(*corev1.Namespace)
return isNamespaceStruct ||
obj.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("Namespace").GroupKind()
}
func copyObject(t testing.TB, obj client.Object) client.Object {
copyRuntimeObj := obj.DeepCopyObject()
copyObj, ok := copyRuntimeObj.(client.Object)
if !ok {
t.Fatal("Unexpected error converting back to client.Object after deep copy")
}
return copyObj
}
// APIExpecter is a helper to define eventual expectations over API resources in tests.
// It's useful when working with clients that maintain a cache, since changes might not be
// reflected immediately, causing tests to flake.
type APIExpecter struct {
t testing.TB
client client.Client
g gomega.Gomega
timeout time.Duration
}
// NewAPIExpecter constructs a new APIExpecter.
func NewAPIExpecter(t testing.TB, client client.Client) *APIExpecter {
return &APIExpecter{
t: t,
g: gomega.NewWithT(t),
client: client,
timeout: 5 * time.Second,
}
}
// DeleteAndWait sends delete requests for a collection of objects and waits until
// the client cache reflects the changes.
func (a *APIExpecter) DeleteAndWait(ctx context.Context, objs ...client.Object) {
a.t.Helper()
for _, obj := range objs {
// namespaces can't be deleted with envtest
if isNamespace(obj) {
continue
}
err := a.client.Delete(ctx, obj)
if !apierrors.IsNotFound(err) {
a.g.Expect(err).To(gomega.Succeed(), "should delete object %s", obj.GetName())
}
a.ShouldEventuallyNotExist(ctx, obj)
}
}
// DeleteAllOfAndWait deletes all objects of the given type and waits until the client's
// cache reflects those changes.
func (a *APIExpecter) DeleteAllOfAndWait(ctx context.Context, obj client.Object) {
a.t.Helper()
a.g.Eventually(func() error {
err := a.client.DeleteAllOf(ctx, obj)
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return errors.New("some objects still existed before delete operation, try deleting another round")
}, a.timeout).Should(gomega.Succeed(), "all objects of kind %s should eventually be deleted", obj.GetObjectKind().GroupVersionKind().Kind)
}
// ShouldEventuallyExist defines an eventual expectation that succeeds if the provided object
// becomes readable by the client before the timeout expires.
func (a *APIExpecter) ShouldEventuallyExist(ctx context.Context, obj client.Object) {
a.t.Helper()
key := client.ObjectKeyFromObject(obj)
a.g.Eventually(func() error {
return a.client.Get(ctx, key, obj)
}, a.timeout).Should(gomega.Succeed(), "object %s should eventually exist", obj.GetName())
}
// ShouldEventuallyMatch defines an eventual expectation that succeeds if the provided object
// becomes readable by the client and matches the provider expectation before the timeout expires.
func (a *APIExpecter) ShouldEventuallyMatch(ctx context.Context, obj client.Object, match func(g gomega.Gomega)) {
a.t.Helper()
key := client.ObjectKeyFromObject(obj)
a.g.Eventually(func(g gomega.Gomega) error {
if err := a.client.Get(ctx, key, obj); err != nil {
return err
}
match(g)
return nil
}, a.timeout).Should(gomega.Succeed(), "object %s should eventually match", obj.GetName())
}
// CloneNameNamespace returns an empty client object of the same type
// with the same and namespace. This is a helper to pass a new object to the "Eventually"
// methods while preserving the original object's data.
func CloneNameNamespace[T any, PT interface {
*T
client.Object
}](obj PT,
) PT {
copyObj := PT(new(T))
copyObj.SetName(obj.GetName())
copyObj.SetNamespace(obj.GetNamespace())
return copyObj
}
// ShouldEventuallyNotExist defines an eventual expectation that succeeds if the provided object
// becomes not found by the client before the timeout expires.
func (a *APIExpecter) ShouldEventuallyNotExist(ctx context.Context, obj client.Object) {
key := client.ObjectKeyFromObject(obj)
a.g.Eventually(func() error {
err := a.client.Get(ctx, key, obj)
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return errors.New("object still exists")
}, a.timeout).Should(gomega.Succeed(), "object %s should eventually be deleted", obj.GetName())
}
| 275 |
eks-anywhere | aws | Go | package envtest_test
import (
"context"
"fmt"
"testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test/envtest"
)
type notFailT struct {
*testing.T
failed bool
panicMessage string
}
func (n *notFailT) Fatal(args ...interface{}) {
n.Logf("Expected failure: %s", fmt.Sprint(args...))
n.failed = true
panic(n.panicMessage)
}
func newNotFailT(t *testing.T) *notFailT {
return ¬FailT{
T: t,
panicMessage: "failed in notFailT",
}
}
func expectToFailTest(t *testing.T, f func(t testing.TB)) {
t.Helper()
testT := newNotFailT(t)
defer func() {
if r := recover(); r != nil {
if s, ok := r.(string); !ok || s != testT.panicMessage {
panic(r)
}
}
}()
f(testT)
t.Fatal("Expected to fail test but didn't")
}
func TestCreateObjs(t *testing.T) {
client := fake.NewClientBuilder().Build()
ctx := context.Background()
secret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: "s",
Namespace: "eksa-system",
},
}
cm := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: "eksa-system",
},
}
pod := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-pod",
Namespace: "eksa-system",
},
Status: appsv1.DeploymentStatus{
Replicas: 10,
},
}
envtest.CreateObjs(ctx, t, client, secret, cm, pod)
}
func TestCreateObjsErrorCreate(t *testing.T) {
client := fake.NewClientBuilder().Build()
ctx := context.Background()
secret := &corev1.Secret{}
expectToFailTest(t, func(tb testing.TB) {
envtest.CreateObjs(ctx, tb, client, secret)
})
}
| 99 |
eks-anywhere | aws | Go | package envtest
import (
"bufio"
"fmt"
"go/build"
"os"
"path/filepath"
"regexp"
"github.com/pkg/errors"
)
type moduleWithCRD struct {
pkg string
crdPaths []string
requireRegex *regexp.Regexp
replaceRegex *regexp.Regexp
}
func mustBuildModuleWithCRDs(p string, opts ...moduleOpt) moduleWithCRD {
pkgCRD, err := buildModuleWithCRD(p, opts...)
if err != nil {
panic(err)
}
return *pkgCRD
}
func withAdditionalCustomCRDPath(customCRDPath string) moduleOpt {
return func(m *moduleWithCRD) {
m.crdPaths = append(m.crdPaths, customCRDPath)
}
}
func withMainCustomCRDPath(customCRDPath string) moduleOpt {
return func(m *moduleWithCRD) {
m.crdPaths[0] = customCRDPath
}
}
type moduleOpt func(*moduleWithCRD)
func buildModuleWithCRD(pkg string, opts ...moduleOpt) (*moduleWithCRD, error) {
requireRegex, err := regexp.Compile(fmt.Sprintf("%s%s v(.+)", `^(\W)`, pkg))
if err != nil {
return nil, errors.Wrapf(err, "failed building regex for package with CRD")
}
replaceRegex, err := regexp.Compile(fmt.Sprintf("%s%s => (.+) v(.+)", `^(\W)`, pkg))
if err != nil {
return nil, errors.Wrapf(err, "failed building regex for package with CRD")
}
m := &moduleWithCRD{
pkg: pkg,
requireRegex: requireRegex,
replaceRegex: replaceRegex,
crdPaths: []string{"config/crd/bases"},
}
for _, opt := range opts {
opt(m)
}
return m, nil
}
type moduleInDisk struct {
moduleWithCRD
path, name, version string
}
func (m moduleInDisk) pathsToCRDs() []string {
paths := make([]string, 0, len(m.crdPaths))
for _, crdPath := range m.crdPaths {
paths = append(paths, pathToCRDs(m.path, m.name, m.version, crdPath))
}
return paths
}
func pathToCRDs(path, name, version, crdPath string) string {
gopath := envOrDefault("GOPATH", build.Default.GOPATH)
return filepath.Join(gopath, "pkg", "mod", path, fmt.Sprintf("%s@v%s", name, version), crdPath)
}
func getPathsToPackagesCRDs(rootFolder string, packages ...moduleWithCRD) ([]string, error) {
goModFile, err := os.Open(filepath.Join(rootFolder, "go.mod"))
if err != nil {
return nil, err
}
defer goModFile.Close()
modulesMappedToDisk := buildModulesMappedToDisk(packages)
scanner := bufio.NewScanner(goModFile)
for scanner.Scan() {
moduleLine := scanner.Text()
for _, p := range packages {
matches := p.requireRegex.FindStringSubmatch(moduleLine)
if len(matches) == 3 {
version := matches[2]
moduleInDisk := modulesMappedToDisk[p.pkg]
if moduleInDisk.version != "" {
// If the package has already been mapped to disk, it was
// probably by a replace, don't overwrite
continue
}
moduleInDisk.path = filepath.Dir(p.pkg)
moduleInDisk.name = filepath.Base(p.pkg)
moduleInDisk.version = version
continue
}
matches = p.replaceRegex.FindStringSubmatch(moduleLine)
if len(matches) == 4 {
replaceModule := matches[2]
replaceVersion := matches[3]
modulesMappedToDisk[p.pkg] = &moduleInDisk{
moduleWithCRD: p,
path: filepath.Dir(replaceModule),
name: filepath.Base(replaceModule),
version: replaceVersion,
}
}
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
paths := make([]string, 0, len(modulesMappedToDisk))
for _, m := range modulesMappedToDisk {
if m.version == "" {
return nil, fmt.Errorf("couldn't find module in disk for %s", m.pkg)
}
paths = append(paths, m.pathsToCRDs()...)
}
return paths, nil
}
func envOrDefault(envKey, defaultValue string) string {
if value, ok := os.LookupEnv(envKey); ok {
return value
}
return defaultValue
}
func buildModulesMappedToDisk(modules []moduleWithCRD) map[string]*moduleInDisk {
modulesMappedToDisk := make(map[string]*moduleInDisk, len(packages))
for _, m := range modules {
modulesMappedToDisk[m.pkg] = &moduleInDisk{
moduleWithCRD: m,
}
}
return modulesMappedToDisk
}
| 162 |
eks-anywhere | aws | Go | package envtest
import (
"testing"
. "github.com/onsi/gomega"
)
func TestModuleWithCRDRegexes(t *testing.T) {
requireDirective := ` sigs.k8s.io/cluster-api v1.0.2`
replaceDirective := ` sigs.k8s.io/cluster-api => github.com/mrajashree/cluster-api v1.0.3-0.20220301005127-382d70d4a76f`
g := NewWithT(t)
m, err := buildModuleWithCRD("sigs.k8s.io/cluster-api")
g.Expect(err).To(BeNil())
matchesRequire := m.requireRegex.FindStringSubmatch(requireDirective)
g.Expect(len(matchesRequire)).To(Equal(3))
g.Expect(matchesRequire[2]).To(Equal("1.0.2"))
matchesRequire = m.requireRegex.FindStringSubmatch(replaceDirective)
g.Expect(matchesRequire).To(BeNil())
matchesReplace := m.replaceRegex.FindStringSubmatch(replaceDirective)
g.Expect(len(matchesReplace)).To(Equal(4))
g.Expect(matchesReplace[2]).To(Equal("github.com/mrajashree/cluster-api"))
g.Expect(matchesReplace[3]).To(Equal("1.0.3-0.20220301005127-382d70d4a76f"))
matchesReplace = m.replaceRegex.FindStringSubmatch(requireDirective)
g.Expect(matchesReplace).To(BeNil())
}
| 31 |
eks-anywhere | aws | Go | package envtest
import (
"context"
"fmt"
"path"
"path/filepath"
goruntime "runtime"
"strings"
"testing"
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/manager"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
capiPackage = "sigs.k8s.io/cluster-api"
capdPackage = "sigs.k8s.io/cluster-api/test"
capvPackage = "sigs.k8s.io/cluster-api-provider-vsphere"
captPackage = "github.com/tinkerbell/cluster-api-provider-tinkerbell"
tinkerbellPackage = "github.com/tinkerbell/tink"
etcdProviderPackage = "github.com/aws/etcdadm-controller"
rufioPackage = "github.com/tinkerbell/rufio"
capcPackage = "sigs.k8s.io/cluster-api-provider-cloudstack"
)
func init() {
// Register CRDs in Scheme in init so fake clients benefit from it
utilruntime.Must(corev1.AddToScheme(scheme.Scheme))
utilruntime.Must(releasev1.AddToScheme(scheme.Scheme))
utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme))
utilruntime.Must(clusterctlv1.AddToScheme(scheme.Scheme))
utilruntime.Must(controlplanev1.AddToScheme(scheme.Scheme))
utilruntime.Must(bootstrapv1.AddToScheme(scheme.Scheme))
utilruntime.Must(vspherev1.AddToScheme(scheme.Scheme))
utilruntime.Must(dockerv1.AddToScheme(scheme.Scheme))
utilruntime.Must(cloudstackv1.AddToScheme(scheme.Scheme))
utilruntime.Must(etcdv1.AddToScheme(scheme.Scheme))
utilruntime.Must(admissionv1beta1.AddToScheme(scheme.Scheme))
utilruntime.Must(anywherev1.AddToScheme(scheme.Scheme))
utilruntime.Must(eksdv1alpha1.AddToScheme(scheme.Scheme))
utilruntime.Must(snowv1.AddToScheme(scheme.Scheme))
utilruntime.Must(addonsv1.AddToScheme(scheme.Scheme))
utilruntime.Must(tinkerbellv1.AddToScheme(scheme.Scheme))
utilruntime.Must(tinkv1alpha1.AddToScheme(scheme.Scheme))
utilruntime.Must(rufiov1alpha1.AddToScheme(scheme.Scheme))
}
var packages = []moduleWithCRD{
mustBuildModuleWithCRDs(capiPackage,
withAdditionalCustomCRDPath("bootstrap/kubeadm/config/crd/bases"),
withAdditionalCustomCRDPath("controlplane/kubeadm/config/crd/bases"),
),
mustBuildModuleWithCRDs(captPackage,
withMainCustomCRDPath("config/crd/bases/infrastructure.cluster.x-k8s.io_tinkerbellclusters.yaml"),
withAdditionalCustomCRDPath("config/crd/bases/infrastructure.cluster.x-k8s.io_tinkerbellmachinetemplates.yaml")),
mustBuildModuleWithCRDs(tinkerbellPackage),
mustBuildModuleWithCRDs(capvPackage),
mustBuildModuleWithCRDs(capdPackage,
withMainCustomCRDPath("infrastructure/docker/config/crd/bases"),
),
mustBuildModuleWithCRDs(etcdProviderPackage),
mustBuildModuleWithCRDs(rufioPackage),
mustBuildModuleWithCRDs(capcPackage),
}
type Environment struct {
scheme *runtime.Scheme
client client.Client
env *envtest.Environment
manager manager.Manager
// apiReader is a non cached client (only for reads), helpful when testing the actual state of objects
apiReader client.Reader
cancelF context.CancelFunc
}
type EnvironmentOpt func(ctx context.Context, e *Environment)
func WithAssignment(envRef **Environment) EnvironmentOpt {
return func(ctx context.Context, e *Environment) {
*envRef = e
}
}
// RunWithEnvironment runs a suite of tests with an envtest that is shared across all tests
// We use testing.M as the input to avoid having this called directly from a test
// This ensures the envtest setup is always run from a TestMain.
func RunWithEnvironment(m *testing.M, opts ...EnvironmentOpt) int {
ctx := ctrl.SetupSignalHandler()
env, err := newEnvironment(ctx)
if err != nil {
fmt.Printf("Failed setting up envtest: %s\n", err)
return 1
}
for _, o := range opts {
o(ctx, env)
}
returnCode := m.Run()
if err = env.stop(); err != nil {
fmt.Printf("Failed stopping envtest: %s", err)
return 1
}
return returnCode
}
func newEnvironment(ctx context.Context) (*Environment, error) {
root := getRootPath()
currentDir := currentDir()
crdDirectoryPaths := make([]string, 0, len(packages)+2)
crdDirectoryPaths = append(crdDirectoryPaths,
filepath.Join(root, "config", "crd", "bases"),
filepath.Join(currentDir, "config", "eks-d-crds.yaml"),
filepath.Join(currentDir, "config", "snow-crds.yaml"),
)
extraCRDPaths, err := getPathsToPackagesCRDs(root, packages...)
if err != nil {
return nil, err
}
crdDirectoryPaths = append(crdDirectoryPaths, extraCRDPaths...)
testEnv := &envtest.Environment{
CRDDirectoryPaths: crdDirectoryPaths,
ErrorIfCRDPathMissing: true,
}
scheme := scheme.Scheme
ctx, cancel := context.WithCancel(ctx)
env := &Environment{
env: testEnv,
scheme: scheme,
cancelF: cancel,
}
cfg, err := testEnv.Start()
if err != nil {
return nil, err
}
// start webhook server using Manager
webhookInstallOptions := &testEnv.WebhookInstallOptions
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
Host: webhookInstallOptions.LocalServingHost,
Port: webhookInstallOptions.LocalServingPort,
CertDir: webhookInstallOptions.LocalServingCertDir,
LeaderElection: false,
MetricsBindAddress: "0",
})
if err != nil {
return nil, err
}
env.manager = mgr
go func() {
err = mgr.Start(ctx)
}()
<-mgr.Elected()
if err != nil {
return nil, err
}
env.client = mgr.GetClient()
env.apiReader = mgr.GetAPIReader()
return env, nil
}
func (e *Environment) stop() error {
fmt.Println("Stopping the test environment")
e.cancelF() // Cancels context that will stop the manager
return e.env.Stop()
}
func (e *Environment) Client() client.Client {
return e.client
}
// APIReader returns a non cached reader client.
func (e *Environment) APIReader() client.Reader {
return e.apiReader
}
// Manager returns a Manager for the test environment.
func (e *Environment) Manager() manager.Manager {
return e.manager
}
func (e *Environment) CreateNamespaceForTest(ctx context.Context, t *testing.T) string {
t.Helper()
name := strings.ReplaceAll(t.Name(), "/", "-")
name = strings.ReplaceAll(name, "_", "-")
name = strings.ToLower(name)
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
if err := e.client.Create(ctx, namespace); err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := e.client.Delete(ctx, namespace); err != nil && !apierrors.IsNotFound(err) {
t.Fatal(err)
}
})
return namespace.Name
}
func getRootPath() string {
return path.Join(currentDir(), "..", "..", "..")
}
func currentDir() string {
_, currentFilePath, _, _ := goruntime.Caller(0)
return path.Dir(currentFilePath)
}
| 250 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: internal/test/reader.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockReader is a mock of Reader interface.
type MockReader struct {
ctrl *gomock.Controller
recorder *MockReaderMockRecorder
}
// MockReaderMockRecorder is the mock recorder for MockReader.
type MockReaderMockRecorder struct {
mock *MockReader
}
// NewMockReader creates a new mock instance.
func NewMockReader(ctrl *gomock.Controller) *MockReader {
mock := &MockReader{ctrl: ctrl}
mock.recorder = &MockReaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockReader) EXPECT() *MockReaderMockRecorder {
return m.recorder
}
// ReadFile mocks base method.
func (m *MockReader) ReadFile(url string) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadFile", url)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReadFile indicates an expected call of ReadFile.
func (mr *MockReaderMockRecorder) ReadFile(url interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadFile", reflect.TypeOf((*MockReader)(nil).ReadFile), url)
}
| 50 |
eks-anywhere | aws | Go | package main
import (
"context"
"flag"
"os"
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/go-logr/logr"
nutanixv1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1"
"github.com/spf13/pflag"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
logsv1 "k8s.io/component-base/logs/api/v1"
_ "k8s.io/component-base/logs/json/register"
"k8s.io/klog/v2"
cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"github.com/aws/eks-anywhere/controllers"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/features"
snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
var scheme = runtime.NewScheme()
const WEBHOOK = "webhook"
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(anywherev1.AddToScheme(scheme))
utilruntime.Must(releasev1.AddToScheme(scheme))
utilruntime.Must(clusterv1.AddToScheme(scheme))
utilruntime.Must(clusterctlv1.AddToScheme(scheme))
utilruntime.Must(controlplanev1.AddToScheme(scheme))
utilruntime.Must(vspherev1.AddToScheme(scheme))
utilruntime.Must(cloudstackv1.AddToScheme(scheme))
utilruntime.Must(dockerv1.AddToScheme(scheme))
utilruntime.Must(etcdv1.AddToScheme(scheme))
utilruntime.Must(kubeadmv1.AddToScheme(scheme))
utilruntime.Must(eksdv1alpha1.AddToScheme(scheme))
utilruntime.Must(snowv1.AddToScheme(scheme))
utilruntime.Must(addonsv1.AddToScheme(scheme))
utilruntime.Must(tinkerbellv1.AddToScheme(scheme))
utilruntime.Must(tinkv1alpha1.AddToScheme(scheme))
utilruntime.Must(rufiov1alpha1.AddToScheme(scheme))
utilruntime.Must(nutanixv1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
type config struct {
metricsAddr string
enableLeaderElection bool
probeAddr string
gates []string
logging *logsv1.LoggingConfiguration
}
func newConfig() *config {
c := &config{
logging: logsv1.NewLoggingConfiguration(),
}
c.logging.Format = logsv1.JSONLogFormat
c.logging.Verbosity = logsv1.VerbosityLevel(0)
return c
}
func initFlags(fs *pflag.FlagSet, config *config) {
logsv1.AddFlags(config.logging, fs)
fs.StringVar(&config.metricsAddr, "metrics-bind-address", "localhost:8080", "The address the metric endpoint binds to.")
fs.StringVar(&config.probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
fs.BoolVar(&config.enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
fs.StringSliceVar(&config.gates, "feature-gates", []string{}, "A set of key=value pairs that describe feature gates for alpha/experimental features. ")
}
func main() {
config := newConfig()
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
initFlags(pflag.CommandLine, config)
pflag.Parse()
// Temporary logger for initialization
setupLog := ctrl.Log.WithName("setup")
if err := logsv1.ValidateAndApply(config.logging, nil); err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
// klog.Background will automatically use the right logger.
ctrl.SetLogger(klog.Background())
// Once controller-runtime logger has been setup correctly, retrieve again
setupLog = ctrl.Log.WithName("setup")
features.FeedGates(config.gates)
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: config.metricsAddr,
Port: 9443,
HealthProbeBindAddress: config.probeAddr,
LeaderElection: config.enableLeaderElection,
LeaderElectionID: "f64ae69e.eks.amazonaws.com",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
// Setup the context that's going to be used in controllers and for the manager.
ctx := ctrl.SetupSignalHandler()
closer := setupReconcilers(ctx, setupLog, mgr)
defer func() {
setupLog.Info("Closing reconciler dependencies")
if err := closer.Close(ctx); err != nil {
setupLog.Error(err, "Failed closing reconciler dependencies")
}
}()
setupWebhooks(setupLog, mgr)
setupChecks(setupLog, mgr)
//+kubebuilder:scaffold:builder
setupLog.Info("Starting manager")
if err := mgr.Start(ctx); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
type closable interface {
Close(ctx context.Context) error
}
func setupReconcilers(ctx context.Context, setupLog logr.Logger, mgr ctrl.Manager) closable {
setupLog.Info("Reading CAPI providers")
providers, err := clusterapi.GetProviders(ctx, mgr.GetAPIReader())
if err != nil {
setupLog.Error(err, "unable to read installed providers")
os.Exit(1)
}
expUpgrades := features.IsActive(features.ExperimentalSelfManagedClusterUpgrade())
if expUpgrades {
setupLog.Info("[EXPERIMENTAL] Self-managed cluster upgrades enabled. Proceed with caution, this is not intended for production scenarios.")
}
factory := controllers.NewFactory(ctrl.Log, mgr).
WithClusterReconciler(
providers, controllers.WithExperimentalSelfManagedClusterUpgrades(expUpgrades),
).
WithVSphereDatacenterReconciler().
WithSnowMachineConfigReconciler().
WithNutanixDatacenterReconciler().
WithCloudStackDatacenterReconciler()
reconcilers, err := factory.Build(ctx)
if err != nil {
setupLog.Error(err, "unable to build reconcilers")
os.Exit(1)
}
failed := false
setupLog.Info("Setting up cluster controller")
if err := (reconcilers.ClusterReconciler).SetupWithManager(mgr, setupLog); err != nil {
setupLog.Error(err, "unable to create controller", "controller", anywherev1.ClusterKind)
failed = true
}
setupLog.Info("Setting up vspheredatacenter controller")
if err := (reconcilers.VSphereDatacenterReconciler).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", anywherev1.VSphereDatacenterKind)
failed = true
}
setupLog.Info("Setting up snowmachineconfig controller")
if err := (reconcilers.SnowMachineConfigReconciler).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", anywherev1.SnowMachineConfigKind)
failed = true
}
setupLog.Info("Setting up nutanixdatacenter controller")
if err := (reconcilers.NutanixDatacenterReconciler).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", anywherev1.NutanixDatacenterKind)
failed = true
}
setupLog.Info("Setting up cloudstackdatacenter controller")
if err := (reconcilers.CloudStackDatacenterReconciler).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", anywherev1.CloudStackDatacenterKind)
failed = true
}
if failed {
if err := factory.Close(ctx); err != nil {
setupLog.Error(err, "Failed closing controller factory")
}
os.Exit(1)
}
return factory
}
func setupWebhooks(setupLog logr.Logger, mgr ctrl.Manager) {
setupCoreWebhooks(setupLog, mgr)
setupVSphereWebhooks(setupLog, mgr)
setupCloudstackWebhooks(setupLog, mgr)
setupSnowWebhooks(setupLog, mgr)
setupTinkerbellWebhooks(setupLog, mgr)
setupNutanixWebhooks(setupLog, mgr)
}
func setupCoreWebhooks(setupLog logr.Logger, mgr ctrl.Manager) {
if err := (&anywherev1.Cluster{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.ClusterKind)
os.Exit(1)
}
if err := (&anywherev1.GitOpsConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.GitOpsConfigKind)
os.Exit(1)
}
if err := (&anywherev1.FluxConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.FluxConfigKind)
os.Exit(1)
}
if err := (&anywherev1.OIDCConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.OIDCConfigKind)
os.Exit(1)
}
if err := (&anywherev1.AWSIamConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.AWSIamConfigKind)
os.Exit(1)
}
}
func setupVSphereWebhooks(setupLog logr.Logger, mgr ctrl.Manager) {
if err := (&anywherev1.VSphereDatacenterConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.VSphereDatacenterKind)
os.Exit(1)
}
if err := (&anywherev1.VSphereMachineConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.VSphereMachineConfigKind)
os.Exit(1)
}
}
func setupCloudstackWebhooks(setupLog logr.Logger, mgr ctrl.Manager) {
if err := (&anywherev1.CloudStackDatacenterConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.CloudStackDatacenterKind)
os.Exit(1)
}
if err := (&anywherev1.CloudStackMachineConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.CloudStackMachineConfigKind)
os.Exit(1)
}
}
func setupSnowWebhooks(setupLog logr.Logger, mgr ctrl.Manager) {
if err := (&anywherev1.SnowMachineConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.SnowMachineConfigKind)
os.Exit(1)
}
if err := (&anywherev1.SnowDatacenterConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "SnowDatacenterConfig")
os.Exit(1)
}
if err := (&anywherev1.SnowIPPool{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "SnowIPPool")
os.Exit(1)
}
}
func setupTinkerbellWebhooks(setupLog logr.Logger, mgr ctrl.Manager) {
if err := (&anywherev1.TinkerbellDatacenterConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.TinkerbellDatacenterKind)
os.Exit(1)
}
if err := (&anywherev1.TinkerbellMachineConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.TinkerbellMachineConfigKind)
os.Exit(1)
}
}
func setupNutanixWebhooks(setupLog logr.Logger, mgr ctrl.Manager) {
if err := (&anywherev1.NutanixDatacenterConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.NutanixDatacenterKind)
os.Exit(1)
}
if err := (&anywherev1.NutanixMachineConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", WEBHOOK, anywherev1.NutanixMachineConfigKind)
os.Exit(1)
}
}
func setupChecks(setupLog logr.Logger, mgr ctrl.Manager) {
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
}
| 326 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const AWSDatacenterKind = "AWSDatacenterConfig"
// Used for generating yaml for generate clusterconfig command.
func NewAWSDatacenterConfigGenerate(clusterName string) *AWSDatacenterConfigGenerate {
return &AWSDatacenterConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: AWSDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: clusterName,
},
Spec: AWSDatacenterConfigSpec{},
}
}
func (c *AWSDatacenterConfigGenerate) APIVersion() string {
return c.TypeMeta.APIVersion
}
func (c *AWSDatacenterConfigGenerate) Kind() string {
return c.TypeMeta.Kind
}
func (c *AWSDatacenterConfigGenerate) Name() string {
return c.ObjectMeta.Name
}
func GetAWSDatacenterConfig(fileName string) (*AWSDatacenterConfig, error) {
var clusterConfig AWSDatacenterConfig
err := ParseClusterConfig(fileName, &clusterConfig)
if err != nil {
return nil, err
}
return &clusterConfig, nil
}
| 43 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestGetAWSDatacenterConfig(t *testing.T) {
type args struct {
fileName string
}
tests := []struct {
name string
args args
want *v1alpha1.AWSDatacenterConfig
wantErr bool
}{
{
name: "Good AWS cluster config parse",
args: args{
fileName: "testdata/cluster_aws.yaml",
},
wantErr: false,
want: &v1alpha1.AWSDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.AWSDatacenterKind,
APIVersion: v1alpha1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: v1alpha1.AWSDatacenterConfigSpec{
AmiID: "my-image",
Region: "us-west",
},
},
},
{
name: "Non existent AWS file",
args: args{
fileName: "testdata/cluster_nonexistent.yaml",
},
wantErr: true,
want: nil,
},
{
name: "Bad AWS cluster config",
args: args{
fileName: "testdata/cluster_vsphere.yaml",
},
wantErr: true,
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := v1alpha1.GetAWSDatacenterConfig(tt.args.fileName)
if (err != nil) != tt.wantErr {
t.Errorf("GetAWSDatacenterConfig() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetAWSDatacenterConfig() got = %v, want %v", got, tt.want)
}
})
}
}
| 72 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// AWSDatacenterConfigSpec defines the desired state of AWSDatacenterConfig.
type AWSDatacenterConfigSpec struct {
// Important: Run "make generate" to regenerate code after modifying this file
Region string `json:"region"`
AmiID string `json:"amiID"`
}
// AWSDatacenterConfigStatus defines the observed state of AWSDatacenterConfig.
type AWSDatacenterConfigStatus struct { // Important: Run "make generate" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// AWSDatacenterConfig is the Schema for the AWSDatacenterConfigs API.
type AWSDatacenterConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSDatacenterConfigSpec `json:"spec,omitempty"`
Status AWSDatacenterConfigStatus `json:"status,omitempty"`
}
func (a *AWSDatacenterConfig) Kind() string {
return a.TypeMeta.Kind
}
func (a *AWSDatacenterConfig) ExpectedKind() string {
return AWSDatacenterKind
}
func (a *AWSDatacenterConfig) PauseReconcile() {
if a.Annotations == nil {
a.Annotations = map[string]string{}
}
a.Annotations[pausedAnnotation] = "true"
}
func (a *AWSDatacenterConfig) ClearPauseAnnotation() {
if a.Annotations != nil {
delete(a.Annotations, pausedAnnotation)
}
}
func (a *AWSDatacenterConfig) ConvertConfigToConfigGenerateStruct() *AWSDatacenterConfigGenerate {
namespace := defaultEksaNamespace
if a.Namespace != "" {
namespace = a.Namespace
}
config := &AWSDatacenterConfigGenerate{
TypeMeta: a.TypeMeta,
ObjectMeta: ObjectMeta{
Name: a.Name,
Annotations: a.Annotations,
Namespace: namespace,
},
Spec: a.Spec,
}
return config
}
// +kubebuilder:object:generate=false
// Same as AWSDatacenterConfig except stripped down for generation of yaml file during generate clusterconfig.
type AWSDatacenterConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec AWSDatacenterConfigSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// AWSDatacenterConfigList contains a list of AWSDatacenterConfig.
type AWSDatacenterConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSDatacenterConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&AWSDatacenterConfig{}, &AWSDatacenterConfigList{})
}
| 94 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
AWSIamConfigKind = "AWSIamConfig"
eksConfigMap = "EKSConfigMap"
mountedFile = "MountedFile"
DefaultAWSIamConfigPartition = "aws"
)
func GetAndValidateAWSIamConfig(fileName string, refName string, clusterConfig *Cluster) (*AWSIamConfig, error) {
config, err := getAWSIamConfig(fileName)
if err != nil {
return nil, err
}
config.SetDefaults()
if err = validateAWSIamConfig(config); err != nil {
return nil, err
}
if err = validateAWSIamRefName(config, refName); err != nil {
return nil, err
}
if err = validateAWSIamNamespace(config, clusterConfig); err != nil {
return nil, err
}
return config, nil
}
func getAWSIamConfig(fileName string) (*AWSIamConfig, error) {
var config AWSIamConfig
err := ParseClusterConfig(fileName, &config)
if err != nil {
return nil, err
}
// If the name is empty, we can assume that they didn't configure their AWS IAM configuration, so return nil
if config.Name == "" {
return nil, nil
}
return &config, nil
}
func validateAWSIamConfig(config *AWSIamConfig) error {
if config == nil {
return nil
}
if config.Spec.AWSRegion == "" {
return fmt.Errorf("AWSIamConfig AWSRegion is a required field")
}
if len(config.Spec.BackendMode) == 0 {
return fmt.Errorf("AWSIamConfig BackendMode is a required field")
}
for _, backendMode := range config.Spec.BackendMode {
if backendMode == eksConfigMap && len(config.Spec.MapRoles) == 0 && len(config.Spec.MapUsers) == 0 {
logger.Info("Warning: AWS IAM Authenticator mapRoles and mapUsers specification is empty. Please be aware this will prevent aws-iam-authenticator from mapping IAM roles to users/groups on the cluster with backendMode EKSConfigMap")
}
if backendMode == mountedFile {
return fmt.Errorf("AWSIamConfig BackendMode does not support %s backend", mountedFile)
}
}
if err := validateMapRoles(config.Spec.MapRoles); err != nil {
return err
}
if err := validateMapUsers(config.Spec.MapUsers); err != nil {
return err
}
return nil
}
func validateMapRoles(mapRoles []MapRoles) error {
for _, role := range mapRoles {
if role.RoleARN == "" {
return fmt.Errorf("AWSIamConfig MapRoles RoleARN is required")
}
if role.Username == "" {
return fmt.Errorf("AWSIamConfig MapRoles Username is required")
}
}
return nil
}
func validateMapUsers(mapUsers []MapUsers) error {
for _, user := range mapUsers {
if user.UserARN == "" {
return fmt.Errorf("AWSIamConfig MapUsers UserARN is required")
}
if user.Username == "" {
return fmt.Errorf("AWSIamConfig MapUsers Username is required")
}
}
return nil
}
func validateAWSIamRefName(config *AWSIamConfig, refName string) error {
if config == nil {
return nil
}
if config.Name != refName {
return fmt.Errorf("AWSIamConfig retrieved with name %s does not match name (%s) specified in "+
"identityProviderRefs", config.Name, refName)
}
return nil
}
func validateAWSIamNamespace(config *AWSIamConfig, clusterConfig *Cluster) error {
if config == nil {
return nil
}
if config.Namespace != clusterConfig.Namespace {
return fmt.Errorf("AWSIamConfig and Cluster objects must have the same namespace specified")
}
return nil
}
func setDefaultAWSIamPartition(config *AWSIamConfig) {
if config.Spec.Partition == "" {
config.Spec.Partition = DefaultAWSIamConfigPartition
logger.V(1).Info("AWSIamConfig Partition is empty. Using default partition 'aws'")
}
}
| 134 |
eks-anywhere | aws | Go | package v1alpha1
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGetAndValidateAWSIamConfig(t *testing.T) {
c := &Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test-cluster",
},
}
tests := []struct {
testName string
fileName string
refName string
wantAWSIamConfig *AWSIamConfig
wantErr bool
}{
{
testName: "file doesn't exist",
fileName: "testdata/fake_file.yaml",
wantAWSIamConfig: nil,
wantErr: true,
},
{
testName: "invalid ref name",
fileName: "testdata/cluster_1_21_awsiam_invalid_refname.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: nil,
wantErr: true,
},
{
testName: "invalid namespace",
fileName: "testdata/cluster_1_21_awsiam_invalid_namespace.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: nil,
wantErr: true,
},
{
testName: "invalid AWSIamConfig no aws region",
fileName: "testdata/cluster_1_21_awsiam_no_awsregion.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: nil,
wantErr: true,
},
{
testName: "invalid AWSIamConfig no aws region",
fileName: "testdata/cluster_1_21_awsiam_no_backendmode.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: nil,
wantErr: true,
},
{
testName: "invalid AWSIamConfig unsupported MountedFile",
fileName: "testdata/cluster_1_21_awsiam_unsupported_mountedfile.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: nil,
wantErr: true,
},
{
testName: "invalid AWSIamConfig no arn",
fileName: "testdata/cluster_1_21_awsiam_no_arn.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: nil,
wantErr: true,
},
{
testName: "valid AWSIamConfig no mapping eksconfigmap backend",
fileName: "testdata/cluster_1_21_awsiam_no_mapping_eksconfigmap.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: &AWSIamConfig{
TypeMeta: metav1.TypeMeta{
Kind: "AWSIamConfig",
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: AWSIamConfigSpec{
AWSRegion: "test-region",
BackendMode: []string{"EKSConfigMap"},
Partition: "aws",
},
},
wantErr: false,
},
{
testName: "valid AWSIamConfig",
fileName: "testdata/cluster_1_21_awsiam.yaml",
refName: "eksa-unit-test",
wantAWSIamConfig: &AWSIamConfig{
TypeMeta: metav1.TypeMeta{
Kind: "AWSIamConfig",
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: AWSIamConfigSpec{
AWSRegion: "test-region",
BackendMode: []string{"mode1", "mode2"},
MapRoles: []MapRoles{
{
RoleARN: "test-role-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
MapUsers: []MapUsers{
{
UserARN: "test-user-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
Partition: "aws",
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
got, err := GetAndValidateAWSIamConfig(tt.fileName, tt.refName, c)
if (err != nil) != tt.wantErr {
t.Fatalf("GetAndValidateAWSIamConfig() error = %v\nwantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.wantAWSIamConfig) {
t.Fatalf("GetAndValidateAWSIamConfig() = %v\nwant %v", got, tt.wantAWSIamConfig)
}
})
}
}
| 138 |
eks-anywhere | aws | Go | package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// AWSIamConfig defines configuration options for AWS IAM Authenticator server
// AWSIamConfigSpec defines the desired state of AWSIamConfig.
type AWSIamConfigSpec struct {
// AWSRegion defines a region in an AWS partition
AWSRegion string `json:"awsRegion"`
// BackendMode defines multiple backends for aws-iam-authenticator server
// The server searches for mappings in order
BackendMode []string `json:"backendMode"`
// +kubebuilder:validation:Optional
MapRoles []MapRoles `json:"mapRoles,omitempty"`
// +kubebuilder:validation:Optional
MapUsers []MapUsers `json:"mapUsers,omitempty"`
// Partition defines the AWS partition on which the IAM roles exist
// +kubebuilder:default:=aws
// +kubebuilder:validation:Optional
Partition string `json:"partition,omitempty"`
}
// MapRoles defines IAM role to a username and set of groups mapping using EKSConfigMap BackendMode.
type MapRoles struct {
RoleARN string `yaml:"rolearn" json:"roleARN"`
Username string `json:"username"`
Groups []string `json:"groups,omitempty"`
}
// MapUsers defines IAM role to a username and set of groups mapping using EKSConfigMap BackendMode.
type MapUsers struct {
UserARN string `yaml:"userarn" json:"userARN"`
Username string `json:"username"`
Groups []string `json:"groups,omitempty"`
}
func (e *AWSIamConfigSpec) Equal(n *AWSIamConfigSpec) bool {
if e == n {
return true
}
if e == nil || n == nil {
return false
}
if e.AWSRegion != n.AWSRegion {
return false
}
if e.Partition != n.Partition {
return false
}
return SliceEqual(e.BackendMode, n.BackendMode)
}
// AWSIamConfigStatus defines the observed state of AWSIamConfig.
type AWSIamConfigStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// AWSIamConfig is the Schema for the awsiamconfigs API.
type AWSIamConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSIamConfigSpec `json:"spec,omitempty"`
Status AWSIamConfigStatus `json:"status,omitempty"`
}
// +kubebuilder:object:generate=false
// Same as AWSIamConfig except stripped down for generation of yaml file while writing to github repo when flux is enabled.
type AWSIamConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec AWSIamConfigSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// AWSIamConfigList contains a list of AWSIamConfig.
type AWSIamConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSIamConfig `json:"items"`
}
func (c *AWSIamConfig) Kind() string {
return c.TypeMeta.Kind
}
func (c *AWSIamConfig) ExpectedKind() string {
return AWSIamConfigKind
}
func (c *AWSIamConfig) ConvertConfigToConfigGenerateStruct() *AWSIamConfigGenerate {
namespace := defaultEksaNamespace
if c.Namespace != "" {
namespace = c.Namespace
}
config := &AWSIamConfigGenerate{
TypeMeta: c.TypeMeta,
ObjectMeta: ObjectMeta{
Name: c.Name,
Annotations: c.Annotations,
Namespace: namespace,
},
Spec: c.Spec,
}
return config
}
func (c *AWSIamConfig) Validate() error {
return validateAWSIamConfig(c)
}
func (c *AWSIamConfig) SetDefaults() {
setDefaultAWSIamPartition(c)
}
func init() {
SchemeBuilder.Register(&AWSIamConfig{}, &AWSIamConfigList{})
}
| 125 |
eks-anywhere | aws | Go | package v1alpha1
import "testing"
func TestEqual(t *testing.T) {
tests := []struct {
testName string
aiOld *AWSIamConfig
aiNew *AWSIamConfig
wantEqual bool
}{
{
testName: "region changed",
aiOld: &AWSIamConfig{
Spec: AWSIamConfigSpec{
AWSRegion: "oldRegion",
},
},
aiNew: &AWSIamConfig{
Spec: AWSIamConfigSpec{
AWSRegion: "newRegion",
},
},
wantEqual: false,
},
{
testName: "partition changed",
aiOld: &AWSIamConfig{
Spec: AWSIamConfigSpec{
Partition: "oldPartition",
},
},
aiNew: &AWSIamConfig{
Spec: AWSIamConfigSpec{
Partition: "newPartition",
},
},
wantEqual: false,
},
{
testName: "backendMode changed",
aiOld: &AWSIamConfig{
Spec: AWSIamConfigSpec{
BackendMode: []string{"mode1", "mode2"},
},
},
aiNew: &AWSIamConfig{
Spec: AWSIamConfigSpec{
BackendMode: []string{"mode1"},
},
},
wantEqual: false,
},
{
testName: "equal success",
aiOld: &AWSIamConfig{
Spec: AWSIamConfigSpec{},
},
aiNew: &AWSIamConfig{
Spec: AWSIamConfigSpec{},
},
wantEqual: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if ok := tt.aiNew.Spec.Equal(&tt.aiOld.Spec); ok != tt.wantEqual {
t.Fatalf("Equal() gotEqual = %t\nwantEqual %t", ok, tt.wantEqual)
}
})
}
}
| 73 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var awsiamconfiglog = logf.Log.WithName("awsiamconfig-resource")
func (r *AWSIamConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
//+kubebuilder:webhook:path=/mutate-anywhere-eks-amazonaws-com-v1alpha1-awsiamconfig,mutating=true,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=awsiamconfigs,verbs=create;update,versions=v1alpha1,name=mutation.awsiamconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Defaulter = &AWSIamConfig{}
// Default implements webhook.Defaulter so a webhook will be registered for the type.
func (r *AWSIamConfig) Default() {
awsiamconfiglog.Info("Setting up AWSIamConfig defaults for", "name", r.Name)
r.SetDefaults()
}
// change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-awsiamconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=awsiamconfigs,verbs=create;update,versions=v1alpha1,name=validation.awsiamconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &AWSIamConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *AWSIamConfig) ValidateCreate() error {
awsiamconfiglog.Info("validate create", "name", r.Name)
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *AWSIamConfig) ValidateUpdate(old runtime.Object) error {
awsiamconfiglog.Info("validate update", "name", r.Name)
oldAWSIamConfig, ok := old.(*AWSIamConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a AWSIamConfig but got a %T", old))
}
var allErrs field.ErrorList
allErrs = append(allErrs, validateImmutableAWSIamFields(r, oldAWSIamConfig)...)
if err := r.Validate(); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("AWSIamConfig"), r, err.Error()))
}
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind(AWSIamConfigKind).GroupKind(), r.Name, allErrs)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *AWSIamConfig) ValidateDelete() error {
awsiamconfiglog.Info("validate delete", "name", r.Name)
return nil
}
func validateImmutableAWSIamFields(new, old *AWSIamConfig) field.ErrorList {
var allErrs field.ErrorList
if !new.Spec.Equal(&old.Spec) {
allErrs = append(
allErrs,
field.Forbidden(field.NewPath(AWSIamConfigKind), "config is immutable"),
)
}
return allErrs
}
| 87 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestValidateUpdateAWSIamConfigFail(t *testing.T) {
aiOld := awsIamConfig()
aiOld.Spec.BackendMode = []string{"mode1", "mode2"}
aiNew := aiOld.DeepCopy()
aiNew.Spec.BackendMode = []string{"mode1"}
g := NewWithT(t)
g.Expect(aiNew.ValidateUpdate(&aiOld)).To(MatchError(ContainSubstring("config is immutable")))
}
func TestValidateUpdateAWSIamConfigSuccess(t *testing.T) {
aiOld := awsIamConfig()
aiOld.Spec.MapRoles = []v1alpha1.MapRoles{}
aiNew := aiOld.DeepCopy()
aiNew.Spec.MapRoles = []v1alpha1.MapRoles{
{
RoleARN: "test-role-arn",
Username: "test-user",
Groups: []string{"group1", "group2"},
},
}
g := NewWithT(t)
g.Expect(aiNew.ValidateUpdate(&aiOld)).To(Succeed())
}
func TestValidateCreateAWSIamConfigSuccess(t *testing.T) {
aiNew := awsIamConfig()
g := NewWithT(t)
g.Expect(aiNew.ValidateCreate()).To(Succeed())
}
func TestValidateCreateAWSIamConfigFail(t *testing.T) {
aiNew := awsIamConfig()
aiNew.Spec.AWSRegion = ""
g := NewWithT(t)
g.Expect(aiNew.ValidateCreate()).To(MatchError(ContainSubstring("AWSRegion is a required field")))
}
func TestValidateUpdateAWSIamConfigFailCausedByMutableFieldChange(t *testing.T) {
aiOld := awsIamConfig()
aiOld.Spec.MapRoles = []v1alpha1.MapRoles{}
aiNew := aiOld.DeepCopy()
aiNew.Spec.MapRoles = []v1alpha1.MapRoles{
{
RoleARN: "test-role-arn",
Username: "",
Groups: []string{"group1", "group2"},
},
}
g := NewWithT(t)
g.Expect(aiNew.ValidateUpdate(&aiOld)).To(MatchError(ContainSubstring("MapRoles Username is required")))
}
func TestAWSIamConfigSetDefaults(t *testing.T) {
g := NewWithT(t)
sOld := awsIamConfig()
sOld.Default()
g.Expect(sOld.Spec.Partition).To(Equal(v1alpha1.DefaultAWSIamConfigPartition))
}
func awsIamConfig() v1alpha1.AWSIamConfig {
return v1alpha1.AWSIamConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 1)},
Spec: v1alpha1.AWSIamConfigSpec{
AWSRegion: "us-east-1",
BackendMode: []string{"mode1"},
},
Status: v1alpha1.AWSIamConfigStatus{},
}
}
| 89 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"net/url"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const CloudStackDatacenterKind = "CloudStackDatacenterConfig"
// Used for generating yaml for generate clusterconfig command.
func NewCloudStackDatacenterConfigGenerate(clusterName string) *CloudStackDatacenterConfigGenerate {
return &CloudStackDatacenterConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: CloudStackDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: clusterName,
},
Spec: CloudStackDatacenterConfigSpec{
AvailabilityZones: []CloudStackAvailabilityZone{
{
Name: "az-1",
Zone: CloudStackZone{
Network: CloudStackResourceIdentifier{},
},
CredentialsRef: "global",
Account: "admin",
Domain: "domain1",
},
},
},
}
}
func (c *CloudStackDatacenterConfigGenerate) APIVersion() string {
return c.TypeMeta.APIVersion
}
func (c *CloudStackDatacenterConfigGenerate) Kind() string {
return c.TypeMeta.Kind
}
func (c *CloudStackDatacenterConfigGenerate) Name() string {
return c.ObjectMeta.Name
}
func GetCloudStackDatacenterConfig(fileName string) (*CloudStackDatacenterConfig, error) {
var clusterConfig CloudStackDatacenterConfig
err := ParseClusterConfig(fileName, &clusterConfig)
if err != nil {
return nil, err
}
return &clusterConfig, nil
}
// GetCloudStackManagementAPIEndpointHostname parses the CloudStackAvailabilityZone's ManagementApiEndpoint URL and returns the hostname.
func GetCloudStackManagementAPIEndpointHostname(az CloudStackAvailabilityZone) (string, error) {
return getHostnameFromURL(az.ManagementApiEndpoint)
}
func getHostnameFromURL(rawurl string) (string, error) {
url, err := url.Parse(rawurl)
if err != nil {
return "", fmt.Errorf("%s is not a valid url", rawurl)
}
return url.Hostname(), nil
}
| 71 |
eks-anywhere | aws | Go | package v1alpha1
import (
"reflect"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGetCloudStackDatacenterConfig(t *testing.T) {
tests := []struct {
testName string
fileName string
wantCloudStackDatacenter *CloudStackDatacenterConfig
wantErr bool
}{
{
testName: "file doesn't exist",
fileName: "testdata/fake_file.yaml",
wantCloudStackDatacenter: nil,
wantErr: true,
},
{
testName: "not parseable file",
fileName: "testdata/not_parseable_cluster_cloudstack.yaml",
wantCloudStackDatacenter: nil,
wantErr: true,
},
{
testName: "valid 1.19",
fileName: "testdata/cluster_1_19_cloudstack.yaml",
wantCloudStackDatacenter: &CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: CloudStackDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackDatacenterConfigSpec{
AvailabilityZones: []CloudStackAvailabilityZone{
{
Name: "default-az-0",
Domain: "domain1",
Account: "admin",
Zone: CloudStackZone{
Name: "zone1",
Network: CloudStackResourceIdentifier{
Name: "net1",
},
},
ManagementApiEndpoint: "https://127.0.0.1:8080/client/api",
},
},
},
},
wantErr: false,
},
{
testName: "valid 1.21",
fileName: "testdata/cluster_1_21_cloudstack.yaml",
wantCloudStackDatacenter: &CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: CloudStackDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackDatacenterConfigSpec{
AvailabilityZones: []CloudStackAvailabilityZone{{
Name: "default-az-0",
Domain: "domain1",
Account: "admin",
Zone: CloudStackZone{
Id: "zoneId",
Network: CloudStackResourceIdentifier{
Id: "netId",
},
},
ManagementApiEndpoint: "https://127.0.0.1:8080/client/api",
}},
},
},
wantErr: false,
},
{
testName: "valid with extra delimiters",
fileName: "testdata/cluster_extra_delimiters_cloudstack.yaml",
wantCloudStackDatacenter: &CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: CloudStackDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackDatacenterConfigSpec{
AvailabilityZones: []CloudStackAvailabilityZone{{
Name: "default-az-0",
Domain: "domain1",
Account: "admin",
Zone: CloudStackZone{
Name: "zone1",
Network: CloudStackResourceIdentifier{
Name: "net1",
},
},
ManagementApiEndpoint: "https://127.0.0.1:8080/client/api",
}},
},
},
wantErr: false,
},
{
testName: "valid 1.20",
fileName: "testdata/cluster_1_20_cloudstack.yaml",
wantCloudStackDatacenter: &CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: CloudStackDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackDatacenterConfigSpec{
AvailabilityZones: []CloudStackAvailabilityZone{{
Name: "default-az-0",
Domain: "domain1",
Account: "admin",
Zone: CloudStackZone{
Name: "zone1",
Network: CloudStackResourceIdentifier{
Name: "net1",
},
},
ManagementApiEndpoint: "https://127.0.0.1:8080/client/api",
}},
},
},
wantErr: false,
},
{
testName: "invalid kind",
fileName: "testdata/cluster_invalid_kinds.yaml",
wantCloudStackDatacenter: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
got, err := GetCloudStackDatacenterConfig(tt.fileName)
if (err != nil) != tt.wantErr {
t.Fatalf("GetCloudStackDatacenterConfig() error = %v, wantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.wantCloudStackDatacenter) {
t.Fatalf("GetCloudStackDatacenterConfig() = %#v, want %#v", got, tt.wantCloudStackDatacenter)
}
})
}
}
var cloudStackDatacenterConfigSpec1 = &CloudStackDatacenterConfigSpec{
Domain: "domain1",
Account: "admin",
Zones: []CloudStackZone{
{
Name: "zone1",
Network: CloudStackResourceIdentifier{
Name: "net1",
},
},
},
ManagementApiEndpoint: "testEndpoint",
}
var cloudStackDatacenterConfigSpecAzs = &CloudStackDatacenterConfigSpec{
AvailabilityZones: []CloudStackAvailabilityZone{
{
Name: "default-az-0",
CredentialsRef: "global",
Zone: CloudStackZone{
Name: "zone1",
Network: CloudStackResourceIdentifier{
Name: "net1",
},
},
Account: "admin",
Domain: "domain1",
ManagementApiEndpoint: "testEndpoint",
},
},
}
func TestCloudStackDatacenterConfigSpecEqual(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpec1.DeepCopy()
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeTrue(), "deep copy CloudStackDatacenterConfigSpec showing as non-equal")
}
func TestCloudStackDatacenterConfigSpecNotEqualEndpoint(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpec1.DeepCopy()
cloudStackDatacenterConfigSpec2.ManagementApiEndpoint = "newEndpoint"
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "ManagementApiEndpoint comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualDomain(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpec1.DeepCopy()
cloudStackDatacenterConfigSpec2.Domain = "newDomain"
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "Domain comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualAccount(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpec1.DeepCopy()
cloudStackDatacenterConfigSpec2.Account = "newAccount"
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "Account comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualZonesNil(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpec1.DeepCopy()
cloudStackDatacenterConfigSpec2.Zones = nil
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "Zones comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualAvailabilityZonesNil(t *testing.T) {
g := NewWithT(t)
g.Expect(cloudStackDatacenterConfigSpecAzs.AvailabilityZones[0].Equal(nil)).To(BeFalse(), "Zones comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualAvailabilityZonesEmpty(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpecAzs.DeepCopy()
cloudStackDatacenterConfigSpec2.AvailabilityZones = []CloudStackAvailabilityZone{}
g.Expect(cloudStackDatacenterConfigSpecAzs.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "AvailabilityZones comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualAvailabilityZonesModified(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpecAzs.DeepCopy()
cloudStackDatacenterConfigSpec2.AvailabilityZones[0].Account = "differentAccount"
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "AvailabilityZones comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackAvailabilityZonesEqual(t *testing.T) {
g := NewWithT(t)
cloudStackAvailabilityZoneSpec2 := cloudStackDatacenterConfigSpecAzs.AvailabilityZones[0].DeepCopy()
g.Expect(cloudStackDatacenterConfigSpecAzs.AvailabilityZones[0].Equal(cloudStackAvailabilityZoneSpec2)).To(BeTrue(), "AvailabilityZones comparison in CloudStackAvailabilityZoneSpec not detected")
}
func TestCloudStackAvailabilityZonesSame(t *testing.T) {
g := NewWithT(t)
g.Expect(cloudStackDatacenterConfigSpecAzs.AvailabilityZones[0].Equal(&cloudStackDatacenterConfigSpecAzs.AvailabilityZones[0])).To(BeTrue(), "AvailabilityZones comparison in CloudStackAvailabilityZoneSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualAvailabilityZonesManagementApiEndpoint(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpecAzs.DeepCopy()
cloudStackDatacenterConfigSpec2.AvailabilityZones[0].ManagementApiEndpoint = "fake-endpoint"
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "AvailabilityZones comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualAvailabilityZonesAccount(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpecAzs.DeepCopy()
cloudStackDatacenterConfigSpec2.AvailabilityZones[0].Account = "fake-acc"
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "AvailabilityZones comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSpecNotEqualAvailabilityZonesDomain(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfigSpec2 := cloudStackDatacenterConfigSpecAzs.DeepCopy()
cloudStackDatacenterConfigSpec2.AvailabilityZones[0].Domain = "fake-domain"
g.Expect(cloudStackDatacenterConfigSpec1.Equal(cloudStackDatacenterConfigSpec2)).To(BeFalse(), "AvailabilityZones comparison in CloudStackDatacenterConfigSpec not detected")
}
func TestCloudStackDatacenterConfigSetDefaults(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfig := CloudStackDatacenterConfig{
Spec: *cloudStackDatacenterConfigSpec1.DeepCopy(),
}
cloudStackDatacenterConfig.SetDefaults()
g.Expect(cloudStackDatacenterConfig.Spec.Equal(cloudStackDatacenterConfigSpecAzs)).To(BeTrue(), "AvailabilityZones comparison in CloudStackDatacenterConfigSpec not equal")
g.Expect(len(cloudStackDatacenterConfigSpec1.Zones)).To(Equal(len(cloudStackDatacenterConfig.Spec.AvailabilityZones)), "AvailabilityZones count in CloudStackDatacenterConfigSpec not equal to zone count")
}
func TestCloudStackDatacenterConfigValidate(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
obj *CloudStackDatacenterConfig
wantErr string
}{
{
name: "valid spec",
obj: cloudStackDatacenterConfig(),
wantErr: "",
},
{
name: "invalid account",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.Account = "admin"
}),
wantErr: "account must be empty",
},
{
name: "invalid domain",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.Domain = "root"
}),
wantErr: "domain must be empty",
},
{
name: "invalid managment api endpoint",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.ManagementApiEndpoint = "http://192.168.1.141:8080/client"
}),
wantErr: "managementApiEndpoint must be empty",
},
{
name: "invalid zones",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.Zones = []CloudStackZone{
{
Name: "zone1",
Network: CloudStackResourceIdentifier{
Name: "net1",
},
},
}
}),
wantErr: "zones must be empty",
},
{
name: "invalid availability zone length",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.AvailabilityZones = []CloudStackAvailabilityZone{}
}),
wantErr: "availabilityZones must not be empty",
},
{
name: "invalid availability zone name",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.AvailabilityZones[0].Name = "_az-1"
}),
wantErr: "availabilityZone names must be a valid label value since it is used to label nodes",
},
{
name: "invalid availability zone no network",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.AvailabilityZones[0].Zone.Network.Name = ""
}),
wantErr: "zone network is not set or is empty",
},
{
name: "invalid availability zone bad management api endpoint",
obj: cloudStackDatacenterConfig(func(c *CloudStackDatacenterConfig) {
c.Spec.AvailabilityZones[0].ManagementApiEndpoint = ":1234.5234"
}),
wantErr: "checking management api endpoint: :1234.5234 is not a valid url",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.wantErr != "" {
err := tt.obj.Validate()
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
} else {
err := tt.obj.Validate()
g.Expect(err).To(BeNil())
}
})
}
}
func TestCloudStackDatacenterConfigValidateAfterSetDefaults(t *testing.T) {
g := NewWithT(t)
cloudStackDatacenterConfig := CloudStackDatacenterConfig{
Spec: *cloudStackDatacenterConfigSpec1.DeepCopy(),
}
cloudStackDatacenterConfig.SetDefaults()
err := cloudStackDatacenterConfig.Validate()
g.Expect(err).To(BeNil())
// Spec.AvailabilityZones validation #2 (Name uniqueness)
cloudStackDatacenterConfig.Spec.AvailabilityZones = append(cloudStackDatacenterConfig.Spec.AvailabilityZones, cloudStackDatacenterConfig.Spec.AvailabilityZones[0])
err = cloudStackDatacenterConfig.Validate()
g.Expect(err).NotTo(BeNil())
}
type cloudStackDatacenterConfigOpt func(c *CloudStackDatacenterConfig)
func cloudStackDatacenterConfig(opts ...cloudStackDatacenterConfigOpt) *CloudStackDatacenterConfig {
config := &CloudStackDatacenterConfig{
Spec: *cloudStackDatacenterConfigSpecAzs.DeepCopy(),
}
for _, opt := range opts {
opt(config)
}
return config
}
| 409 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"fmt"
"strings"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
const DefaultCloudStackAZPrefix = "default-az"
// CloudStackDatacenterConfigSpec defines the desired state of CloudStackDatacenterConfig.
type CloudStackDatacenterConfigSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Domain contains a grouping of accounts. Domains usually contain multiple accounts that have some logical relationship to each other and a set of delegated administrators with some authority over the domain and its subdomains
// This field is considered as a fully qualified domain name which is the same as the domain path without "ROOT/" prefix. For example, if "foo" is specified then a domain with "ROOT/foo" domain path is picked.
// The value "ROOT" is a special case that points to "the" ROOT domain of the CloudStack. That is, a domain with a path "ROOT/ROOT" is not allowed.
// +optional
// Deprecated: Please use AvailabilityZones instead
Domain string `json:"domain,omitempty"`
// Zones is a list of one or more zones that are managed by a single CloudStack management endpoint.
// +optional
// Deprecated: Please use AvailabilityZones instead
Zones []CloudStackZone `json:"zones,omitempty"`
// Account typically represents a customer of the service provider or a department in a large organization. Multiple users can exist in an account, and all CloudStack resources belong to an account. Accounts have users and users have credentials to operate on resources within that account. If an account name is provided, a domain must also be provided.
// +optional
// Deprecated: Please use AvailabilityZones instead
Account string `json:"account,omitempty"`
// CloudStack Management API endpoint's IP. It is added to VM's noproxy list
// +optional
// Deprecated: Please use AvailabilityZones instead
ManagementApiEndpoint string `json:"managementApiEndpoint,omitempty"`
// AvailabilityZones list of different partitions to distribute VMs across - corresponds to a list of CAPI failure domains
AvailabilityZones []CloudStackAvailabilityZone `json:"availabilityZones,omitempty"`
}
type CloudStackResourceIdentifier struct {
// Id of a resource in the CloudStack environment. Mutually exclusive with Name
// +optional
Id string `json:"id,omitempty"`
// Name of a resource in the CloudStack environment. Mutually exclusive with Id
// +optional
Name string `json:"name,omitempty"`
}
func (r *CloudStackResourceIdentifier) Equal(o *CloudStackResourceIdentifier) bool {
if r == o {
return true
}
if r == nil || o == nil {
return false
}
if r.Id != o.Id {
return false
}
return r.Id == "" && o.Id == "" && r.Name == o.Name
}
// CloudStackZone is an organizational construct typically used to represent a single datacenter, and all its physical and virtual resources exist inside that zone. It can either be specified as a UUID or name.
type CloudStackZone struct {
// Zone is the name or UUID of the CloudStack zone in which clusters should be created. Zones should be managed by a single CloudStack Management endpoint.
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
// Network is the name or UUID of the CloudStack network in which clusters should be created. It can either be an isolated or shared network. If it doesn’t already exist in CloudStack, it’ll automatically be created by CAPC as an isolated network. It can either be specified as a UUID or name
// In multiple-zones situation, only 'Shared' network is supported.
Network CloudStackResourceIdentifier `json:"network"`
}
// CloudStackAvailabilityZone maps to a CAPI failure domain to distribute machines across Cloudstack infrastructure.
type CloudStackAvailabilityZone struct {
// Name is used as a unique identifier for each availability zone
Name string `json:"name"`
// CredentialRef is used to reference a secret in the eksa-system namespace
CredentialsRef string `json:"credentialsRef"`
// Zone represents the properties of the CloudStack zone in which clusters should be created, like the network.
Zone CloudStackZone `json:"zone"`
// Domain contains a grouping of accounts. Domains usually contain multiple accounts that have some logical relationship to each other and a set of delegated administrators with some authority over the domain and its subdomains
// This field is considered as a fully qualified domain name which is the same as the domain path without "ROOT/" prefix. For example, if "foo" is specified then a domain with "ROOT/foo" domain path is picked.
// The value "ROOT" is a special case that points to "the" ROOT domain of the CloudStack. That is, a domain with a path "ROOT/ROOT" is not allowed.
Domain string `json:"domain"`
// Account typically represents a customer of the service provider or a department in a large organization. Multiple users can exist in an account, and all CloudStack resources belong to an account. Accounts have users and users have credentials to operate on resources within that account. If an account name is provided, a domain must also be provided.
Account string `json:"account,omitempty"`
// CloudStack Management API endpoint's IP. It is added to VM's noproxy list
ManagementApiEndpoint string `json:"managementApiEndpoint"`
}
// CloudStackDatacenterConfigStatus defines the observed state of CloudStackDatacenterConfig.
type CloudStackDatacenterConfigStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// SpecValid is set to true if cloudstackdatacenterconfig is validated.
SpecValid bool `json:"specValid,omitempty"`
// ObservedGeneration is the latest generation observed by the controller.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// FailureMessage indicates that there is a fatal problem reconciling the
// state, and will be set to a descriptive error message.
FailureMessage *string `json:"failureMessage,omitempty"`
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// CloudStackDatacenterConfig is the Schema for the cloudstackdatacenterconfigs API.
type CloudStackDatacenterConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CloudStackDatacenterConfigSpec `json:"spec,omitempty"`
Status CloudStackDatacenterConfigStatus `json:"status,omitempty"`
}
func (v *CloudStackDatacenterConfig) Kind() string {
return v.TypeMeta.Kind
}
func (v *CloudStackDatacenterConfig) ExpectedKind() string {
return CloudStackDatacenterKind
}
func (v *CloudStackDatacenterConfig) PauseReconcile() {
if v.Annotations == nil {
v.Annotations = map[string]string{}
}
v.Annotations[pausedAnnotation] = "true"
}
func (v *CloudStackDatacenterConfig) IsReconcilePaused() bool {
if s, ok := v.Annotations[pausedAnnotation]; ok {
return s == "true"
}
return false
}
func (v *CloudStackDatacenterConfig) ClearPauseAnnotation() {
if v.Annotations != nil {
delete(v.Annotations, pausedAnnotation)
}
}
func (v *CloudStackDatacenterConfig) ConvertConfigToConfigGenerateStruct() *CloudStackDatacenterConfigGenerate {
namespace := defaultEksaNamespace
if v.Namespace != "" {
namespace = v.Namespace
}
config := &CloudStackDatacenterConfigGenerate{
TypeMeta: v.TypeMeta,
ObjectMeta: ObjectMeta{
Name: v.Name,
Annotations: v.Annotations,
Namespace: namespace,
},
Spec: v.Spec,
}
return config
}
func (v *CloudStackDatacenterConfig) Marshallable() Marshallable {
return v.ConvertConfigToConfigGenerateStruct()
}
func (v *CloudStackDatacenterConfig) Validate() error {
if v.Spec.Account != "" {
return errors.New("account must be empty")
}
if v.Spec.Domain != "" {
return errors.New("domain must be empty")
}
if v.Spec.ManagementApiEndpoint != "" {
return errors.New("managementApiEndpoint must be empty")
}
if len(v.Spec.Zones) > 0 {
return errors.New("zones must be empty")
}
if len(v.Spec.AvailabilityZones) == 0 {
return errors.New("availabilityZones must not be empty")
}
azSet := make(map[string]bool)
for _, az := range v.Spec.AvailabilityZones {
errorMessages := validation.IsValidLabelValue(az.Name)
if len(errorMessages) > 0 {
return fmt.Errorf("availabilityZone names must be a valid label value since it is used to label nodes: %s",
strings.Join(errorMessages, ";"))
}
if exists := azSet[az.Name]; exists {
return fmt.Errorf("availabilityZone names must be unique. Duplicate name: %s", az.Name)
}
azSet[az.Name] = true
_, err := GetCloudStackManagementAPIEndpointHostname(az)
if err != nil {
return fmt.Errorf("checking management api endpoint: %v", err)
}
if len(az.Zone.Network.Id) == 0 && len(az.Zone.Network.Name) == 0 {
return fmt.Errorf("zone network is not set or is empty")
}
}
return nil
}
func (v *CloudStackDatacenterConfig) SetDefaults() {
if v.Spec.AvailabilityZones == nil || len(v.Spec.AvailabilityZones) == 0 {
v.Spec.AvailabilityZones = make([]CloudStackAvailabilityZone, 0, len(v.Spec.Zones))
for index, csZone := range v.Spec.Zones {
az := CloudStackAvailabilityZone{
Name: fmt.Sprintf("%s-%d", DefaultCloudStackAZPrefix, index),
Zone: csZone,
Account: v.Spec.Account,
Domain: v.Spec.Domain,
ManagementApiEndpoint: v.Spec.ManagementApiEndpoint,
CredentialsRef: "global",
}
v.Spec.AvailabilityZones = append(v.Spec.AvailabilityZones, az)
}
}
v.Spec.Zones = nil
v.Spec.Domain = ""
v.Spec.Account = ""
v.Spec.ManagementApiEndpoint = ""
}
func (s *CloudStackDatacenterConfigSpec) Equal(o *CloudStackDatacenterConfigSpec) bool {
if s == o {
return true
}
if s == nil || o == nil {
return false
}
if len(s.Zones) != len(o.Zones) {
return false
}
for i, z := range s.Zones {
if !z.Equal(&o.Zones[i]) {
return false
}
}
if len(s.AvailabilityZones) != len(o.AvailabilityZones) {
return false
}
oAzsMap := map[string]CloudStackAvailabilityZone{}
for _, oAz := range o.AvailabilityZones {
oAzsMap[oAz.Name] = oAz
}
for _, sAz := range s.AvailabilityZones {
oAz, found := oAzsMap[sAz.Name]
if !found || !sAz.Equal(&oAz) {
return false
}
}
return s.ManagementApiEndpoint == o.ManagementApiEndpoint &&
s.Domain == o.Domain &&
s.Account == o.Account
}
func (z *CloudStackZone) Equal(o *CloudStackZone) bool {
if z == o {
return true
}
if z == nil || o == nil {
return false
}
if z.Id == o.Id &&
z.Name == o.Name &&
z.Network.Id == o.Network.Id &&
z.Network.Name == o.Network.Name {
return true
}
return false
}
func (az *CloudStackAvailabilityZone) Equal(o *CloudStackAvailabilityZone) bool {
if az == o {
return true
}
if az == nil || o == nil {
return false
}
return az.Zone.Equal(&o.Zone) &&
az.Name == o.Name &&
az.CredentialsRef == o.CredentialsRef &&
az.Account == o.Account &&
az.Domain == o.Domain &&
az.ManagementApiEndpoint == o.ManagementApiEndpoint
}
// +kubebuilder:object:generate=false
// Same as CloudStackDatacenterConfig except stripped down for generation of yaml file during generate clusterconfig.
type CloudStackDatacenterConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec CloudStackDatacenterConfigSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// CloudStackDatacenterConfigList contains a list of CloudStackDatacenterConfig.
type CloudStackDatacenterConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CloudStackDatacenterConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&CloudStackDatacenterConfig{}, &CloudStackDatacenterConfigList{})
}
| 330 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"fmt"
"regexp"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var cloudstackdatacenterconfiglog = logf.Log.WithName("cloudstackdatacenterconfig-resource")
func (r *CloudStackDatacenterConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
//+kubebuilder:webhook:path=/mutate-anywhere-eks-amazonaws-com-v1alpha1-cloudstackdatacenterconfig,mutating=true,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=cloudstackdatacenterconfigs,verbs=create;update,versions=v1alpha1,name=mutation.cloudstackdatacenterconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Defaulter = &CloudStackDatacenterConfig{}
// Default implements webhook.Defaulter so a webhook will be registered for the type.
func (r *CloudStackDatacenterConfig) Default() {
cloudstackdatacenterconfiglog.Info("Setting up CloudStackDatacenterConfig defaults for", "name", r.Name)
r.SetDefaults()
}
// change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-cloudstackdatacenterconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=cloudstackdatacenterconfigs,verbs=create;update,versions=v1alpha1,name=validation.cloudstackdatacenterconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &CloudStackDatacenterConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *CloudStackDatacenterConfig) ValidateCreate() error {
cloudstackdatacenterconfiglog.Info("validate create", "name", r.Name)
if r.IsReconcilePaused() {
cloudstackdatacenterconfiglog.Info("CloudStackDatacenterConfig is paused, so allowing create", "name", r.Name)
return nil
}
return nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *CloudStackDatacenterConfig) ValidateUpdate(old runtime.Object) error {
cloudstackdatacenterconfiglog.Info("validate update", "name", r.Name)
oldDatacenterConfig, ok := old.(*CloudStackDatacenterConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a CloudStackDataCenterConfig but got a %T", old))
}
if oldDatacenterConfig.IsReconcilePaused() {
cloudstackdatacenterconfiglog.Info("Reconciliation is paused")
return nil
}
var allErrs field.ErrorList
allErrs = append(allErrs, validateImmutableFieldsCloudStackCluster(r, oldDatacenterConfig)...)
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind(CloudStackDatacenterKind).GroupKind(), r.Name, allErrs)
}
func isValidAzConversionName(uuid string) bool {
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}")
return r.MatchString(uuid)
}
func isCapcV1beta1ToV1beta2Upgrade(new, old *CloudStackDatacenterConfigSpec) bool {
if len(new.AvailabilityZones) != len(old.AvailabilityZones) {
return false
}
for _, az := range old.AvailabilityZones {
if !strings.HasPrefix(az.Name, DefaultCloudStackAZPrefix) {
return false
}
}
for _, az := range new.AvailabilityZones {
if !isValidAzConversionName(az.Name) {
return false
}
}
return true
}
func validateImmutableFieldsCloudStackCluster(new, old *CloudStackDatacenterConfig) field.ErrorList {
var allErrs field.ErrorList
specPath := field.NewPath("spec")
// Check for CAPC v1beta1 -> CAPC v1beta2 upgrade
if isCapcV1beta1ToV1beta2Upgrade(&new.Spec, &old.Spec) {
return allErrs
}
newAzMap := make(map[string]CloudStackAvailabilityZone)
for _, az := range new.Spec.AvailabilityZones {
newAzMap[az.Name] = az
}
atLeastOneAzOverlap := false
for _, oldAz := range old.Spec.AvailabilityZones {
if newAz, ok := newAzMap[oldAz.Name]; ok {
atLeastOneAzOverlap = true
if !newAz.Equal(&oldAz) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("availabilityZone", oldAz.Name), "availabilityZone is immutable"),
)
}
}
}
if !atLeastOneAzOverlap {
allErrs = append(
allErrs,
field.Invalid(field.NewPath("spec", "availabilityZone"), new.Spec.AvailabilityZones, "at least one AvailabilityZone must be shared between new and old CloudStackDatacenterConfig specs"),
)
}
return allErrs
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *CloudStackDatacenterConfig) ValidateDelete() error {
cloudstackdatacenterconfiglog.Info("validate delete", "name", r.Name)
// TODO(user): fill in your validation logic upon object deletion.
return nil
}
| 155 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestCloudStackDatacenterDatacenterConfigSetDefaults(t *testing.T) {
g := NewWithT(t)
originalDatacenter := cloudstackDatacenterConfig()
originalDatacenter.Spec.AvailabilityZones = nil
originalDatacenter.Spec.Domain = "domain"
originalDatacenter.Spec.Account = "admin"
originalDatacenter.Spec.ManagementApiEndpoint = "https://127.0.0.1:8080/client/api"
originalDatacenter.Spec.Zones = []v1alpha1.CloudStackZone{
{Name: "test_zone", Network: v1alpha1.CloudStackResourceIdentifier{Name: "test_zone"}},
}
expectedDatacenter := originalDatacenter.DeepCopy()
expectedDatacenter.Spec.AvailabilityZones = []v1alpha1.CloudStackAvailabilityZone{
{
Name: "default-az-0",
CredentialsRef: "global",
Zone: originalDatacenter.Spec.Zones[0],
Domain: originalDatacenter.Spec.Domain,
Account: originalDatacenter.Spec.Account,
ManagementApiEndpoint: originalDatacenter.Spec.ManagementApiEndpoint,
},
}
expectedDatacenter.Spec.Zones = nil
expectedDatacenter.Spec.Domain = ""
expectedDatacenter.Spec.Account = ""
expectedDatacenter.Spec.ManagementApiEndpoint = ""
originalDatacenter.Default()
g.Expect(originalDatacenter).To(Equal(*expectedDatacenter))
}
func TestCloudStackDatacenterValidateUpdateDomainImmutable(t *testing.T) {
vOld := cloudstackDatacenterConfig()
vOld.Spec.AvailabilityZones[0].Domain = "oldCruftyDomain"
c := vOld.DeepCopy()
c.Spec.AvailabilityZones[0].Domain = "shinyNewDomain"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCloudStackDatacenterValidateUpdateV1beta1ToV1beta2Upgrade(t *testing.T) {
vOld := cloudstackDatacenterConfig()
vOld.Spec.AvailabilityZones[0].Name = "default-az-0"
vNew := vOld.DeepCopy()
vNew.Spec.AvailabilityZones[0].Name = "12345678-abcd-4abc-abcd-abcd12345678"
g := NewWithT(t)
g.Expect(vNew.ValidateUpdate(&vOld)).To(Succeed())
}
func TestCloudStackDatacenterValidateUpdateV1beta1ToV1beta2UpgradeAddAzInvalid(t *testing.T) {
vOld := cloudstackDatacenterConfig()
vOld.Spec.AvailabilityZones[0].Name = "default-az-0"
vNew := vOld.DeepCopy()
vNew.Spec.AvailabilityZones[0].Name = "12345678-abcd-4abc-abcd-abcd12345678"
vNew.Spec.AvailabilityZones = append(vNew.Spec.AvailabilityZones, vNew.Spec.AvailabilityZones[0])
g := NewWithT(t)
g.Expect(vNew.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCloudStackDatacenterValidateUpdateRenameAzInvalid(t *testing.T) {
vOld := cloudstackDatacenterConfig()
vOld.Spec.AvailabilityZones[0].Name = "default-az-0"
vNew := vOld.DeepCopy()
vNew.Spec.AvailabilityZones[0].Name = "shinyNewAzName"
g := NewWithT(t)
g.Expect(vNew.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCloudStackDatacenterValidateUpdateManagementApiEndpointImmutable(t *testing.T) {
vOld := cloudstackDatacenterConfig()
vOld.Spec.AvailabilityZones[0].ManagementApiEndpoint = "oldCruftyManagementApiEndpoint"
c := vOld.DeepCopy()
c.Spec.AvailabilityZones[0].ManagementApiEndpoint = "shinyNewManagementApiEndpoint"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCloudStackDatacenterValidateUpdateZonesImmutable(t *testing.T) {
vOld := cloudstackDatacenterConfig()
c := vOld.DeepCopy()
c.Spec.AvailabilityZones[0].Zone.Name = "shinyNewZone"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCloudStackDatacenterValidateUpdateAccountImmutable(t *testing.T) {
vOld := cloudstackDatacenterConfig()
c := vOld.DeepCopy()
c.Spec.AvailabilityZones[0].Account = "shinyNewAccount"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCloudStackDatacenterValidateUpdateNetworkImmutable(t *testing.T) {
vOld := cloudstackDatacenterConfig()
c := vOld.DeepCopy()
c.Spec.AvailabilityZones[0].Zone.Network.Name = "GuestNet2"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCloudStackDatacenterValidateUpdateWithPausedAnnotation(t *testing.T) {
vOld := cloudstackDatacenterConfig()
vOld.Spec.Zones = []v1alpha1.CloudStackZone{
{
Name: "oldCruftyZone",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "GuestNet1",
},
},
}
c := vOld.DeepCopy()
c.Spec.Zones = []v1alpha1.CloudStackZone{
{
Name: "oldCruftyZone",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "GuestNet2",
},
},
}
vOld.PauseReconcile()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestCloudStackDatacenterValidateUpdateInvalidType(t *testing.T) {
vOld := &v1alpha1.Cluster{}
c := &v1alpha1.CloudStackDatacenterConfig{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(vOld)).NotTo(Succeed())
}
func cloudstackDatacenterConfig() v1alpha1.CloudStackDatacenterConfig {
return v1alpha1.CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 1)},
Spec: v1alpha1.CloudStackDatacenterConfigSpec{
AvailabilityZones: []v1alpha1.CloudStackAvailabilityZone{
{
Name: "default-az-0",
Zone: v1alpha1.CloudStackZone{
Name: "oldCruftyZone",
Network: v1alpha1.CloudStackResourceIdentifier{
Name: "GuestNet1",
},
},
},
},
},
Status: v1alpha1.CloudStackDatacenterConfigStatus{},
}
}
| 178 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"os"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
// DefaultCloudStackUser is the default CloudStackMachingConfig username.
const DefaultCloudStackUser = "capc"
// CloudStackMachineConfigKind is the kind value for a CloudStackMachineConfig.
const CloudStackMachineConfigKind = "CloudStackMachineConfig"
// Taken from https://github.com/shapeblue/cloudstack/blob/08bb4ad9fea7e422c3d3ac6d52f4670b1e89eed7/api/src/main/java/com/cloud/vm/VmDetailConstants.java
// These fields should be modeled separately in eks-a and not used by the additionalDetails cloudstack VM field.
var restrictedUserCustomDetails = [...]string{
"keyboard", "cpu.corespersocket", "rootdisksize", "boot.mode", "nameonhypervisor",
"nicAdapter", "rootDiskController", "dataDiskController", "svga.vramSize", "nestedVirtualizationFlag", "ramReservation",
"hypervisortoolsversion", "platform", "timeoffset", "kvm.vnc.port", "kvm.vnc.address", "video.hardware", "video.ram",
"smc.present", "firmware", "cpuNumber", "cpuSpeed", "memory", "cpuOvercommitRatio", "memoryOvercommitRatio",
"Message.ReservedCapacityFreed.Flag", "deployvm", "SSH.PublicKey", "SSH.KeyPairNames", "password", "Encrypted.Password",
"configDriveLocation", "nic", "network", "ip4Address", "ip6Address", "disk", "diskOffering", "configurationId",
"keypairnames", "controlNodeLoginUser",
}
// Used for generating yaml for generate clusterconfig command.
func NewCloudStackMachineConfigGenerate(name string) *CloudStackMachineConfigGenerate {
return &CloudStackMachineConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: CloudStackMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: name,
},
Spec: CloudStackMachineConfigSpec{
ComputeOffering: CloudStackResourceIdentifier{
Id: "",
},
Template: CloudStackResourceIdentifier{
Id: "",
},
Users: []UserConfiguration{{
Name: "capc",
SshAuthorizedKeys: []string{"ssh-rsa AAAA..."},
}},
},
}
}
func (c *CloudStackMachineConfigGenerate) APIVersion() string {
return c.TypeMeta.APIVersion
}
func (c *CloudStackMachineConfigGenerate) Kind() string {
return c.TypeMeta.Kind
}
func (c *CloudStackMachineConfigGenerate) Name() string {
return c.ObjectMeta.Name
}
func GetCloudStackMachineConfigs(fileName string) (map[string]*CloudStackMachineConfig, error) {
configs := make(map[string]*CloudStackMachineConfig)
content, err := os.ReadFile(fileName)
if err != nil {
return nil, fmt.Errorf("unable to read file due to: %v", err)
}
for _, c := range strings.Split(string(content), YamlSeparator) {
var config CloudStackMachineConfig
if err = yaml.UnmarshalStrict([]byte(c), &config); err == nil {
if config.Kind == CloudStackMachineConfigKind {
configs[config.Name] = &config
continue
}
}
_ = yaml.Unmarshal([]byte(c), &config) // this is to check if there is a bad spec in the file
if config.Kind == CloudStackMachineConfigKind {
return nil, fmt.Errorf("unable to unmarshall content from file due to: %v", err)
}
}
if len(configs) == 0 {
return nil, fmt.Errorf("unable to find kind %v in file", CloudStackMachineConfigKind)
}
return configs, nil
}
func validateCloudStackMachineConfig(machineConfig *CloudStackMachineConfig) error {
if len(machineConfig.Spec.ComputeOffering.Id) == 0 && len(machineConfig.Spec.ComputeOffering.Name) == 0 {
return fmt.Errorf("computeOffering is not set for CloudStackMachineConfig %s. Default computeOffering is not supported in CloudStack, please provide a computeOffering name or ID", machineConfig.Name)
}
if len(machineConfig.Spec.Template.Id) == 0 && len(machineConfig.Spec.Template.Name) == 0 {
return fmt.Errorf("template is not set for CloudStackMachineConfig %s. Default template is not supported in CloudStack, please provide a template name or ID", machineConfig.Name)
}
if err, fieldName, fieldValue := machineConfig.Spec.DiskOffering.Validate(); err != nil {
return fmt.Errorf("machine config %s validation failed: %s: %s invalid, %v", machineConfig.Name, fieldName, fieldValue, err)
}
for _, restrictedKey := range restrictedUserCustomDetails {
if _, found := machineConfig.Spec.UserCustomDetails[restrictedKey]; found {
return fmt.Errorf("restricted key %s found in custom user details", restrictedKey)
}
}
if err := validateAffinityConfig(machineConfig); err != nil {
return err
}
return nil
}
func validateAffinityConfig(machineConfig *CloudStackMachineConfig) error {
if len(machineConfig.Spec.Affinity) > 0 && len(machineConfig.Spec.AffinityGroupIds) > 0 {
return fmt.Errorf("affinity and affinityGroupIds cannot be set at the same time for CloudStackMachineConfig %s. Please provide either one of them or none", machineConfig.Name)
}
if len(machineConfig.Spec.Affinity) > 0 {
if machineConfig.Spec.Affinity != "pro" && machineConfig.Spec.Affinity != "anti" && machineConfig.Spec.Affinity != "no" {
return fmt.Errorf("invalid affinity type %s for CloudStackMachineConfig %s. Please provide \"pro\", \"anti\" or \"no\"", machineConfig.Spec.Affinity, machineConfig.Name)
}
}
return nil
}
| 124 |
eks-anywhere | aws | Go | package v1alpha1
import (
"reflect"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var cloudStackMachineConfigSpec1 = &CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{
"foo": "bar",
},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
AffinityGroupIds: []string{"affinityGroupId1"},
}
func TestGetCloudStackMachineConfigs(t *testing.T) {
tests := []struct {
testName string
fileName string
wantCloudStackMachineConfigs map[string]*CloudStackMachineConfig
wantErr bool
}{
{
testName: "file doesn't exist",
fileName: "testdata/fake_file.yaml",
wantCloudStackMachineConfigs: nil,
wantErr: true,
},
{
testName: "not parseable file",
fileName: "testdata/not_parseable_cluster.yaml",
wantCloudStackMachineConfigs: nil,
wantErr: true,
},
{
testName: "valid 1.19",
fileName: "testdata/cluster_1_19_cloudstack.yaml",
wantCloudStackMachineConfigs: map[string]*CloudStackMachineConfig{
"eksa-unit-test": {
TypeMeta: metav1.TypeMeta{
Kind: CloudStackMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "centos7-k8s-119",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "m4-large",
},
Users: []UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
},
wantErr: false,
},
{
testName: "valid 1.21",
fileName: "testdata/cluster_1_21_cloudstack.yaml",
wantCloudStackMachineConfigs: map[string]*CloudStackMachineConfig{
"eksa-unit-test": {
TypeMeta: metav1.TypeMeta{
Kind: CloudStackMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Id: "centos7-k8s-121-id",
},
ComputeOffering: CloudStackResourceIdentifier{
Id: "m4-large-id",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "Small",
},
MountPath: "/data-small",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
},
wantErr: false,
},
{
testName: "valid with extra delimiters",
fileName: "testdata/cluster_extra_delimiters_cloudstack.yaml",
wantCloudStackMachineConfigs: map[string]*CloudStackMachineConfig{
"eksa-unit-test": {
TypeMeta: metav1.TypeMeta{
Kind: CloudStackMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "centos7-k8s-118",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "m4-large",
},
Users: []UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
},
wantErr: false,
},
{
testName: "valid 1.20",
fileName: "testdata/cluster_1_20_cloudstack.yaml",
wantCloudStackMachineConfigs: map[string]*CloudStackMachineConfig{
"eksa-unit-test": {
TypeMeta: metav1.TypeMeta{
Kind: CloudStackMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "centos7-k8s-120",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "m4-large",
},
Users: []UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
},
wantErr: false,
},
{
testName: "valid different machine configs",
fileName: "testdata/cluster_different_machine_configs_cloudstack.yaml",
wantCloudStackMachineConfigs: map[string]*CloudStackMachineConfig{
"eksa-unit-test": {
TypeMeta: metav1.TypeMeta{
Kind: CloudStackMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "centos7-k8s-118",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "m4-large",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "Small",
},
MountPath: "/data-small",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
"eksa-unit-test-2": {
TypeMeta: metav1.TypeMeta{
Kind: CloudStackMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test-2",
},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "centos7-k8s-118",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "m5-xlarge",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "Medium",
},
MountPath: "/data-medium",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
},
wantErr: false,
},
{
testName: "invalid kind",
fileName: "testdata/cluster_invalid_kinds.yaml",
wantCloudStackMachineConfigs: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
got, err := GetCloudStackMachineConfigs(tt.fileName)
if (err != nil) != tt.wantErr {
t.Fatalf("GetCloudStackMachineConfigs() error = %v, wantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.wantCloudStackMachineConfigs) {
t.Fatalf("GetCloudStackMachineConfigs() = %#v, want %#v", got, tt.wantCloudStackMachineConfigs)
}
})
}
}
func TestCloudStackMachineConfigValidate(t *testing.T) {
tests := []struct {
name string
obj *CloudStackMachineConfig
wantErr string
}{
{
name: "valid config",
obj: &CloudStackMachineConfig{
Spec: *cloudStackMachineConfigSpec1,
},
wantErr: "",
},
{
name: "disk offering empty",
obj: &CloudStackMachineConfig{
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{
"foo": "bar",
},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
AffinityGroupIds: []string{"affinityGroupId1"},
},
},
wantErr: "",
},
{
name: "invalid - bad mount path",
obj: &CloudStackMachineConfig{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{
"foo": "bar",
},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
Affinity: "pro",
},
},
wantErr: "mountPath: / invalid, must be non-empty and start with /",
},
{
name: "invalid - empty device",
obj: &CloudStackMachineConfig{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{
"foo": "bar",
},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
AffinityGroupIds: []string{"affinityGroupId1"},
},
},
wantErr: "device: invalid, empty device",
},
{
name: "invalid - empty filesystem",
obj: &CloudStackMachineConfig{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "",
Label: "data_disk",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{
"foo": "bar",
},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
Affinity: "pro",
},
},
wantErr: "filesystem: invalid, empty filesystem",
},
{
name: "invalid - empty label",
obj: &CloudStackMachineConfig{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{
"foo": "bar",
},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
AffinityGroupIds: []string{"affinityGroupId1"},
},
},
wantErr: "label: invalid, empty label",
},
{
name: "invalid - restricted user details",
obj: &CloudStackMachineConfig{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{"keyboard": "test"},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
AffinityGroupIds: []string{"affinityGroupId1"},
},
},
wantErr: "restricted key keyboard found in custom user details",
},
{
name: "bad affinity type",
obj: &CloudStackMachineConfig{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{"foo": "bar"},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
Affinity: "xxx",
},
},
wantErr: "invalid affinity type xxx for CloudStackMachineConfig test",
},
{
name: "both affinity and affinityGroupIds are defined",
obj: &CloudStackMachineConfig{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: CloudStackMachineConfigSpec{
Template: CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: CloudStackResourceIdentifier{
Name: "offering1",
},
DiskOffering: &CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
},
Users: []UserConfiguration{
{
Name: "zone1",
SshAuthorizedKeys: []string{"key"},
},
},
UserCustomDetails: map[string]string{"foo": "bar"},
Symlinks: map[string]string{
"/var/log/kubernetes": "/data/var/log/kubernetes",
},
AffinityGroupIds: []string{"affinityGroupId1"},
Affinity: "pro",
},
},
wantErr: "affinity and affinityGroupIds cannot be set at the same time for CloudStackMachineConfig test. Please provide either one of them or none",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := tt.obj.Validate()
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestCloudStackMachineConfigSpecEqual(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeTrue(), "deep copy CloudStackMachineConfigSpec showing as non-equal")
}
func TestCloudStackMachineNotEqualTemplateName(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.Template.Name = "newName"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Template name comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualTemplateId(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.Template.Id = "newId"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Template id comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualComputeOfferingName(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.ComputeOffering.Name = "newComputeOffering"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Compute offering name comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualComputeOfferingId(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.ComputeOffering.Id = "newComputeOffering"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Compute offering id comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualDiskOfferingName(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering = (*cloudStackMachineConfigSpec1.DiskOffering).DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering.Name = "newDiskOffering"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Disk offering name comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualDiskOfferingId(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering = (*cloudStackMachineConfigSpec1.DiskOffering).DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering.Id = "newDiskOffering"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Disk offering id comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualDiskOfferingMountPath(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering = (*cloudStackMachineConfigSpec1.DiskOffering).DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering.MountPath = "newDiskOfferingPath"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Disk offering path comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualDiskOfferingDevice(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering = (*cloudStackMachineConfigSpec1.DiskOffering).DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering.Device = "/dev/sdb"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Disk offering device comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualDiskOfferingLabel(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering = (*cloudStackMachineConfigSpec1.DiskOffering).DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering.Label = "data_disk_new"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Disk offering label comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualDiskOfferingFilesystem(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering = (*cloudStackMachineConfigSpec1.DiskOffering).DeepCopy()
cloudStackMachineConfigSpec2.DiskOffering.Filesystem = "ext3"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Disk offering filesystem comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualAffinity(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.Affinity = "anti"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Affinity comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualUsersNil(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.Users = nil
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Account comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualUsers(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.Users = append(cloudStackMachineConfigSpec2.Users, UserConfiguration{Name: "newUser", SshAuthorizedKeys: []string{"newKey"}})
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Account comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualUserCustomDetailsNil(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.UserCustomDetails = nil
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "UserCustomDetails comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualSymlinksNil(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.Symlinks = nil
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Symlinks comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualUserCustomDetails(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.UserCustomDetails["i"] = "j"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "UserCustomDetails comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualSymlinks(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
cloudStackMachineConfigSpec2.Symlinks["i"] = "j"
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Symlinks comparison in CloudStackMachineConfigSpec not detected")
}
func TestCloudStackMachineNotEqualSymlinksDifferentTargetSameKey(t *testing.T) {
g := NewWithT(t)
cloudStackMachineConfigSpec2 := cloudStackMachineConfigSpec1.DeepCopy()
for k, v := range cloudStackMachineConfigSpec2.Symlinks {
cloudStackMachineConfigSpec2.Symlinks[k] = "/different" + v
}
g.Expect(cloudStackMachineConfigSpec1.Equal(cloudStackMachineConfigSpec2)).To(BeFalse(), "Symlinks comparison in CloudStackMachineConfigSpec not detected")
}
| 717 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestCloudStackMachineConfigDiskOfferingEqual(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
diskOffering2 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(diskOffering2)).To(BeTrue())
}
func TestCloudStackMachineConfigNilDiskOfferingEqual(t *testing.T) {
var nilDiskOffering *v1alpha1.CloudStackResourceDiskOffering
emptyDiskOffering := &v1alpha1.CloudStackResourceDiskOffering{
MountPath: "",
Device: "",
Filesystem: "",
Label: "",
}
g := NewWithT(t)
g.Expect(nilDiskOffering.Equal(emptyDiskOffering)).To(BeTrue())
}
func TestCloudStackMachineConfigEmptyDiskOfferingEqual(t *testing.T) {
emptyDiskOffering1 := v1alpha1.CloudStackResourceDiskOffering{}
emptyDiskOffering2 := &v1alpha1.CloudStackResourceDiskOffering{
MountPath: "",
Device: "",
Filesystem: "",
Label: "",
}
g := NewWithT(t)
g.Expect(emptyDiskOffering1.Equal(emptyDiskOffering2)).To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingEqualSelf(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(diskOffering1)).To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingNotEqualNil(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(nil)).To(BeFalse())
}
func TestCloudStackMachineConfigDiskOfferingNotEqualName(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
diskOffering2 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering2",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(diskOffering2)).To(BeFalse())
}
func TestCloudStackMachineConfigDiskOfferingNotEqualMountPath(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
diskOffering2 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering2",
},
MountPath: "/data_different",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(diskOffering2)).To(BeFalse())
}
func TestCloudStackMachineConfigDiskOfferingNotEqualDevice(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
diskOffering2 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering2",
},
MountPath: "/data",
Device: "/dev/vdb_different",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(diskOffering2)).To(BeFalse())
}
func TestCloudStackMachineConfigDiskOfferingNotEqualFilesystem(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
diskOffering2 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering2",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "xfs",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(diskOffering2)).To(BeFalse())
}
func TestCloudStackMachineConfigDiskOfferingNotEqualLabel(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
diskOffering2 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering2",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk_different",
}
g := NewWithT(t)
g.Expect(diskOffering1.Equal(diskOffering2)).To(BeFalse())
}
func TestCloudStackMachineConfigDiskOfferingValidMountPath(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err == nil).To(BeTrue())
g.Expect(fieldName == "").To(BeTrue())
g.Expect(fieldValue == "").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingInValidNoIDAndName(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "id or name").To(BeTrue())
g.Expect(fieldValue == "").To(BeTrue())
g.Expect(err.Error() == "empty id/name").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingValidNoIDAndName(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{},
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err == nil).To(BeTrue())
g.Expect(fieldName == "").To(BeTrue())
g.Expect(fieldValue == "").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingInValidMountPathRoot(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "mountPath").To(BeTrue())
g.Expect(fieldValue == "/").To(BeTrue())
g.Expect(err.Error() == "must be non-empty and start with /").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingInValidMountPathRelative(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "mountPath").To(BeTrue())
g.Expect(fieldValue == "data").To(BeTrue())
g.Expect(err.Error() == "must be non-empty and start with /").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingValid(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err == nil).To(BeTrue())
g.Expect(fieldName == "").To(BeTrue())
g.Expect(fieldValue == "").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingInValidEmptyDevice(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "device").To(BeTrue())
g.Expect(fieldValue == "").To(BeTrue())
g.Expect(err.Error() == "empty device").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingInValidEmptyFilesystem(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "",
Label: "data_disk",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "filesystem").To(BeTrue())
g.Expect(fieldValue == "").To(BeTrue())
g.Expect(err.Error() == "empty filesystem").To(BeTrue())
}
func TestCloudStackMachineConfigDiskOfferingInValidEmptyLabel(t *testing.T) {
diskOffering1 := &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "",
}
g := NewWithT(t)
err, fieldName, fieldValue := diskOffering1.Validate()
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "label").To(BeTrue())
g.Expect(fieldValue == "").To(BeTrue())
g.Expect(err.Error() == "empty label").To(BeTrue())
}
func TestCloudStackMachineConfigSymlinksValid(t *testing.T) {
symlinks := v1alpha1.SymlinkMaps{
"/var/lib.a": "/data/-var/_log",
}
err, _, _ := symlinks.Validate()
g := NewWithT(t)
g.Expect(err == nil).To(BeTrue())
}
func TestCloudStackMachineConfigSymlinksInValidColon(t *testing.T) {
symlinks := v1alpha1.SymlinkMaps{
"/var/lib": "/data/var/log:d",
}
err, fieldName, fieldValue := symlinks.Validate()
g := NewWithT(t)
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "symlinks").To(BeTrue())
g.Expect(fieldValue == "/data/var/log:d").To(BeTrue())
g.Expect(err.Error() == "has char not in portable file name set").To(BeTrue())
}
func TestCloudStackMachineConfigSymlinksInValidComma(t *testing.T) {
symlinks := v1alpha1.SymlinkMaps{
"/var/lib": "/data/var/log,d",
}
err, fieldName, fieldValue := symlinks.Validate()
g := NewWithT(t)
g.Expect(err != nil).To(BeTrue())
g.Expect(fieldName == "symlinks").To(BeTrue())
g.Expect(fieldValue == "/data/var/log,d").To(BeTrue())
g.Expect(err.Error() == "has char not in portable file name set").To(BeTrue())
}
func TestCloudStackMachineConfigSerialize(t *testing.T) {
tests := map[string]struct {
machineConfig interface{}
expected string
}{
"Serialize machine config": {
machineConfig: v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{
DiskOffering: &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "diskOffering1",
},
MountPath: "/data",
Device: "/dev/sda1",
Filesystem: "ext4",
Label: "data_disk",
},
},
},
expected: `metadata:
creationTimestamp: null
spec:
computeOffering: {}
diskOffering:
device: /dev/sda1
filesystem: ext4
label: data_disk
mountPath: /data
name: diskOffering1
template: {}
status: {}
`,
},
"diskOffering should not appear when it's not defined": {
machineConfig: v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{},
},
expected: `metadata:
creationTimestamp: null
spec:
computeOffering: {}
template: {}
status: {}
`,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
actual, err := yaml.Marshal(tc.machineConfig)
g := NewWithT(t)
g.Expect(err).To(BeNil())
g.Expect(string(actual)).To(Equal(tc.expected))
})
}
}
func TestCloudStackMachineConfigValidateUsers(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
machineConfig *v1alpha1.CloudStackMachineConfig
wantErr string
}{
{
name: "users valid",
machineConfig: &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{{
Name: "capc",
SshAuthorizedKeys: []string{"ssh-rsa AAAA..."},
}},
},
},
},
{
name: "users not set",
machineConfig: &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{},
},
wantErr: "users is not set for CloudStackMachineConfig , please provide a user",
},
{
name: "user name empty",
machineConfig: &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{{
Name: "",
SshAuthorizedKeys: []string{"ssh-rsa AAAA..."},
}},
},
},
wantErr: "users[0].name is not set or is empty for CloudStackMachineConfig , please provide a username",
},
{
name: "user ssh authorized key empty or not set",
machineConfig: &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{{
Name: "Jeff",
SshAuthorizedKeys: []string{""},
}},
},
},
wantErr: "users[0].SshAuthorizedKeys is not set or is empty for CloudStackMachineConfig , please provide a valid ssh authorized key",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.machineConfig.ValidateUsers()
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestCloudStackMachineConfigSetDefaultUsers(t *testing.T) {
g := NewWithT(t)
machineConfig := &v1alpha1.CloudStackMachineConfig{
Spec: v1alpha1.CloudStackMachineConfigSpec{},
}
machineConfig.SetUserDefaults()
g.Expect(machineConfig.Spec.Users).To(Equal([]v1alpha1.UserConfiguration{
{
Name: v1alpha1.DefaultCloudStackUser,
SshAuthorizedKeys: []string{""},
},
}))
}
| 519 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var cloudstackmachineconfiglog = logf.Log.WithName("cloudstackmachineconfig-resource")
func (r *CloudStackMachineConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-cloudstackmachineconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=cloudstackmachineconfigs,verbs=create;update,versions=v1alpha1,name=validation.cloudstackmachineconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &CloudStackMachineConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *CloudStackMachineConfig) ValidateCreate() error {
cloudstackmachineconfiglog.Info("validate create", "name", r.Name)
if err, fieldName, fieldValue := r.Spec.DiskOffering.Validate(); err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("disk offering %s:%v, preventing CloudStackMachineConfig resource creation: %v", fieldName, fieldValue, err))
}
if err, fieldName, fieldValue := r.Spec.Symlinks.Validate(); err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("symlinks %s:%v, preventing CloudStackMachineConfig resource creation: %v", fieldName, fieldValue, err))
}
// This is only needed for the webhook, which is why it is separate from the Validate method
if err := r.ValidateUsers(); err != nil {
return err
}
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *CloudStackMachineConfig) ValidateUpdate(old runtime.Object) error {
cloudstackmachineconfiglog.Info("validate update", "name", r.Name)
oldCloudStackMachineConfig, ok := old.(*CloudStackMachineConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a CloudStackMachineConfig but got a %T", old))
}
if oldCloudStackMachineConfig.IsReconcilePaused() {
cloudstackmachineconfiglog.Info("Reconciliation is paused")
return nil
}
if oldCloudStackMachineConfig.IsManagement() {
cloudstackmachineconfiglog.Info("Machine config is associated with workload cluster", "name", oldCloudStackMachineConfig.Name)
return nil
}
// This is only needed for the webhook, which is why it is separate from the Validate method
if err := r.ValidateUsers(); err != nil {
return apierrors.NewInvalid(GroupVersion.WithKind(CloudStackMachineConfigKind).GroupKind(),
r.Name,
field.ErrorList{
field.Invalid(field.NewPath("spec", "users"), r.Spec.Users, err.Error()),
})
}
var allErrs field.ErrorList
allErrs = append(allErrs, validateImmutableFieldsCloudStackMachineConfig(r, oldCloudStackMachineConfig)...)
if err, fieldName, fieldValue := r.Spec.DiskOffering.Validate(); err != nil {
allErrs = append(
allErrs,
field.Invalid(field.NewPath("spec", "diskOffering", fieldName), fieldValue, err.Error()),
)
}
if err, fieldName, fieldValue := r.Spec.Symlinks.Validate(); err != nil {
allErrs = append(
allErrs,
field.Invalid(field.NewPath("spec", "symlinks", fieldName), fieldValue, err.Error()),
)
}
if err := r.ValidateUsers(); err != nil {
allErrs = append(
allErrs,
field.Invalid(field.NewPath("spec", "users"), r.Spec.Users, err.Error()))
}
if err := r.Validate(); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec"), r.Spec, err.Error()))
}
if len(allErrs) > 0 {
return apierrors.NewInvalid(GroupVersion.WithKind(CloudStackMachineConfigKind).GroupKind(), r.Name, allErrs)
}
return nil
}
func validateImmutableFieldsCloudStackMachineConfig(new, old *CloudStackMachineConfig) field.ErrorList {
var allErrs field.ErrorList
if old.Spec.Affinity != new.Spec.Affinity {
allErrs = append(
allErrs,
field.Invalid(field.NewPath("spec", "affinity"), new.Spec.Affinity, "field is immutable"),
)
}
affinityGroupIdsMutated := false
if len(old.Spec.AffinityGroupIds) != len(new.Spec.AffinityGroupIds) {
affinityGroupIdsMutated = true
} else {
for index, id := range old.Spec.AffinityGroupIds {
if id != new.Spec.AffinityGroupIds[index] {
affinityGroupIdsMutated = true
break
}
}
}
if affinityGroupIdsMutated {
allErrs = append(
allErrs,
field.Invalid(field.NewPath("spec", "affinityGroupIdsMutated"), new.Spec.AffinityGroupIds, "field is immutable"),
)
}
return allErrs
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *CloudStackMachineConfig) ValidateDelete() error {
cloudstackmachineconfiglog.Info("validate delete", "name", r.Name)
return nil
}
| 142 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestCloudStackMachineConfigValidateCreateValidDiskOffering(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "DiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).To(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidDiskOfferingBadMountPath(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "DiskOffering",
},
MountPath: "/",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidDiskOfferingEmptyDevice(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "DiskOffering",
},
MountPath: "/data",
Device: "",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidDiskOfferingEmptyFilesystem(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "DiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidDiskOfferingEmptyLabel(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "DiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateValidSymlinks(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/lib.a": "/_data/var-redirect/log.d",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).To(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidSymlinksColon(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/lib:a": "/_data/var-redirect/log:d",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidSymlinksComma(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/lib:a": "/_data/var-redirect/log,d",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidSymlinksKeyNotStartWithRoot(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"var/lib": "/data/var/log",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidSymlinksValueNotStartWithRoot(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/lib": "data/var/log",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidSymlinksKeyEndWithRoot(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/lib/": "/data/var/log",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidSymlinksValueEndWithRoot(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/lib": "/data/var/log/",
}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidTemplateEmpty(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Template = v1alpha1.CloudStackResourceIdentifier{}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidComputeOfferingEmpty(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCloudStackMachineConfigValidateCreateInvalidUsers(t *testing.T) {
c := cloudstackMachineConfig()
c.Spec.Users = []v1alpha1.UserConfiguration{{Name: "Jeff"}}
g := NewWithT(t)
g.Expect(c.ValidateCreate()).NotTo(Succeed())
}
func TestCPCloudStackMachineValidateUpdateTemplateMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Template = v1alpha1.CloudStackResourceIdentifier{
Name: "oldTemplate",
}
c := vOld.DeepCopy()
c.Spec.Template = v1alpha1.CloudStackResourceIdentifier{
Name: "newTemplate",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestWorkersCPCloudStackMachineValidateUpdateTemplateMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.Spec.Template = v1alpha1.CloudStackResourceIdentifier{
Name: "oldTemplate",
}
c := vOld.DeepCopy()
c.Spec.Template = v1alpha1.CloudStackResourceIdentifier{
Name: "newTemplate",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestCPCloudStackMachineValidateUpdateComputeOfferingMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Name: "oldComputeOffering",
}
c := vOld.DeepCopy()
c.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Name: "newComputeOffering",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestCPCloudStackMachineValidateUpdateDiskOfferingMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "oldDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
c := vOld.DeepCopy()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "newDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestCPCloudStackMachineValidateUpdateDiskOfferingMutableFailInvalidMountPath(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "oldDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
c := vOld.DeepCopy()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "newDiskOffering",
},
MountPath: "/",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCPCloudStackMachineValidateUpdateDiskOfferingMutableFailEmptyDevice(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "oldDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
c := vOld.DeepCopy()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "newDiskOffering",
},
MountPath: "/data",
Device: "",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCPCloudStackMachineValidateUpdateDiskOfferingMutableFailEmptyFilesystem(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "oldDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
c := vOld.DeepCopy()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "newDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCPCloudStackMachineValidateUpdateDiskOfferingMutableFailEmptyLabel(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "oldDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
c := vOld.DeepCopy()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "newDiskOffering",
},
MountPath: "/data",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCPCloudStackMachineValidateUpdateSymlinksMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/log": "/data/var/log",
}
c := vOld.DeepCopy()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/log": "/data_2/var/log",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestCPCloudStackMachineValidateUpdateSymlinksMutableInvalidComma(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/log": "/data/var/log",
}
c := vOld.DeepCopy()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/log": "/data_2/var/log,d",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestCPCloudStackMachineValidateUpdateSymlinksMutableColon(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/log": "/data/var/log",
}
c := vOld.DeepCopy()
c.Spec.Symlinks = v1alpha1.SymlinkMaps{
"/var/log": "/data_2/var/log:d",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).NotTo(Succeed())
}
func TestWorkersCPCloudStackMachineValidateUpdateComputeOfferingMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Name: "oldComputeOffering",
}
c := vOld.DeepCopy()
c.Spec.ComputeOffering = v1alpha1.CloudStackResourceIdentifier{
Name: "newComputeOffering",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestWorkersCPCloudStackMachineValidateUpdateDiskOfferingMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "oldDiskOffering",
},
MountPath: "/data",
}
c := vOld.DeepCopy()
c.Spec.DiskOffering = &v1alpha1.CloudStackResourceDiskOffering{
CloudStackResourceIdentifier: v1alpha1.CloudStackResourceIdentifier{
Name: "newDiskOffering",
},
MountPath: "/data-new",
Device: "/dev/vdb",
Filesystem: "ext4",
Label: "data_disk",
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestManagementCloudStackMachineValidateUpdateSshAuthorizedKeyMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.SetManagement("test-cluster")
vOld.Spec.Users = []v1alpha1.UserConfiguration{{Name: "Jeff"}}
vOld.Spec.Users[0].SshAuthorizedKeys = []string{"rsa-blahdeblahbalh"}
c := vOld.DeepCopy()
c.Spec.Users[0].SshAuthorizedKeys[0] = "rsa-laDeLala"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestWorkloadCloudStackMachineValidateUpdateSshAuthorizedKeyMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Users = []v1alpha1.UserConfiguration{{Name: "Jeff"}}
vOld.Spec.Users[0].SshAuthorizedKeys = []string{"rsa-blahdeblahbalh"}
c := vOld.DeepCopy()
c.Spec.Users[0].SshAuthorizedKeys[0] = "rsa-laDeLala"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestManagementCloudStackMachineValidateUpdateSshUsernameMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.SetManagement("test-cluster")
vOld.Spec.Users = []v1alpha1.UserConfiguration{{Name: "Jeff"}}
c := vOld.DeepCopy()
c.Spec.Users[0].Name = "Andy"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestWorkloadCloudStackMachineValidateUpdateSshUsernameMutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Users = []v1alpha1.UserConfiguration{{
Name: "Jeff",
SshAuthorizedKeys: []string{"rsa-blahdeblahbalh"},
}}
c := vOld.DeepCopy()
c.Spec.Users[0].Name = "Andy"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).To(Succeed())
}
func TestWorkloadCloudStackMachineValidateUpdateInvalidUsers(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Users = []v1alpha1.UserConfiguration{{
Name: "Jeff",
SshAuthorizedKeys: []string{"rsa-blahdeblahbalh"},
}}
c := vOld.DeepCopy()
c.Spec.Users[0].Name = ""
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).ToNot(Succeed())
}
func TestCloudStackMachineValidateUpdateInvalidType(t *testing.T) {
vOld := &v1alpha1.Cluster{}
c := &v1alpha1.CloudStackMachineConfig{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(vOld)).NotTo(Succeed())
}
func cloudstackMachineConfig() v1alpha1.CloudStackMachineConfig {
return v1alpha1.CloudStackMachineConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 2)},
Spec: v1alpha1.CloudStackMachineConfigSpec{
Template: v1alpha1.CloudStackResourceIdentifier{
Name: "template1",
},
ComputeOffering: v1alpha1.CloudStackResourceIdentifier{
Name: "offering1",
},
Users: []v1alpha1.UserConfiguration{
{
Name: "capc",
SshAuthorizedKeys: []string{"ssh-rsa AAAA..."},
},
},
},
Status: v1alpha1.CloudStackMachineConfigStatus{},
}
}
func TestCloudStackMachineValidateUpdateAffinityImmutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.Affinity = "pro"
c := vOld.DeepCopy()
c.Spec.Affinity = "anti"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).ToNot(Succeed())
}
func TestCloudStackMachineValidateUpdateAffinityGroupIdsImmutable(t *testing.T) {
vOld := cloudstackMachineConfig()
vOld.SetControlPlane()
vOld.Spec.AffinityGroupIds = []string{"affinity-group-1"}
c := vOld.DeepCopy()
c.Spec.AffinityGroupIds = []string{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).ToNot(Succeed())
c.Spec.AffinityGroupIds = []string{"affinity-group-2"}
g = NewWithT(t)
g.Expect(c.ValidateUpdate(&vOld)).ToNot(Succeed())
}
| 552 |
eks-anywhere | aws | Go | package v1alpha1
import (
"context"
"errors"
"fmt"
"net"
"net/url"
"os"
"regexp"
"strconv"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/validation/field"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/networkutils"
"github.com/aws/eks-anywhere/pkg/semver"
)
const (
ClusterKind = "Cluster"
YamlSeparator = "\n---\n"
RegistryMirrorCAKey = "EKSA_REGISTRY_MIRROR_CA"
podSubnetNodeMaskMaxDiff = 16
)
var re = regexp.MustCompile(constants.DefaultCuratedPackagesRegistryRegex)
// +kubebuilder:object:generate=false
type ClusterGenerateOpt func(config *ClusterGenerate)
// Used for generating yaml for generate clusterconfig command.
func NewClusterGenerate(clusterName string, opts ...ClusterGenerateOpt) *ClusterGenerate {
clusterConfig := &ClusterGenerate{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: clusterName,
},
Spec: ClusterSpec{
KubernetesVersion: GetClusterDefaultKubernetesVersion(),
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
},
},
}
clusterConfig.SetSelfManaged()
for _, opt := range opts {
opt(clusterConfig)
}
return clusterConfig
}
func ControlPlaneConfigCount(count int) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.ControlPlaneConfiguration.Count = count
}
}
func ExternalETCDConfigCount(count int) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.ExternalEtcdConfiguration = &ExternalEtcdConfiguration{
Count: count,
}
}
}
func WorkerNodeConfigCount(count int) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.WorkerNodeGroupConfigurations = []WorkerNodeGroupConfiguration{{Count: &count}}
}
}
func WorkerNodeConfigName(name string) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.WorkerNodeGroupConfigurations[0].Name = name
}
}
func WithClusterEndpoint() ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.ControlPlaneConfiguration.Endpoint = &Endpoint{Host: ""}
}
}
// WithCPUpgradeRolloutStrategy allows add UpgradeRolloutStrategy option to cluster config under ControlPlaneConfiguration.
func WithCPUpgradeRolloutStrategy(maxSurge int, maxUnavailable int) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy = &ControlPlaneUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: ControlPlaneRollingUpdateParams{MaxSurge: maxSurge}}
}
}
func WithDatacenterRef(ref ProviderRefAccessor) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.DatacenterRef = Ref{
Kind: ref.Kind(),
Name: ref.Name(),
}
}
}
func WithSharedMachineGroupRef(ref ProviderRefAccessor) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.ControlPlaneConfiguration.MachineGroupRef = &Ref{
Kind: ref.Kind(),
Name: ref.Name(),
}
c.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef = &Ref{
Kind: ref.Kind(),
Name: ref.Name(),
}
}
}
func WithCPMachineGroupRef(ref ProviderRefAccessor) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.ControlPlaneConfiguration.MachineGroupRef = &Ref{
Kind: ref.Kind(),
Name: ref.Name(),
}
}
}
func WithWorkerMachineGroupRef(ref ProviderRefAccessor) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef = &Ref{
Kind: ref.Kind(),
Name: ref.Name(),
}
}
}
// WithWorkerMachineUpgradeRolloutStrategy allows add UpgradeRolloutStrategy option to cluster config under WorkerNodeGroupConfiguration.
func WithWorkerMachineUpgradeRolloutStrategy(maxSurge int, maxUnavailable int) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
c.Spec.WorkerNodeGroupConfigurations[0].UpgradeRolloutStrategy = &WorkerNodesUpgradeRolloutStrategy{
Type: "RollingUpdate",
RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: maxSurge, MaxUnavailable: maxUnavailable},
}
}
}
func WithEtcdMachineGroupRef(ref ProviderRefAccessor) ClusterGenerateOpt {
return func(c *ClusterGenerate) {
if c.Spec.ExternalEtcdConfiguration != nil {
c.Spec.ExternalEtcdConfiguration.MachineGroupRef = &Ref{
Kind: ref.Kind(),
Name: ref.Name(),
}
}
}
}
func NewCluster(clusterName string) *Cluster {
c := &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
},
Status: ClusterStatus{},
}
c.SetSelfManaged()
return c
}
var clusterConfigValidations = []func(*Cluster) error{
validateClusterConfigName,
validateControlPlaneEndpoint,
validateExternalEtcdSupport,
validateMachineGroupRefs,
validateControlPlaneReplicas,
validateWorkerNodeGroups,
validateNetworking,
validateGitOps,
validateEtcdReplicas,
validateIdentityProviderRefs,
validateProxyConfig,
validateMirrorConfig,
validatePodIAMConfig,
validateCPUpgradeRolloutStrategy,
validateControlPlaneLabels,
validatePackageControllerConfiguration,
validateCloudStackK8sVersion,
}
// GetClusterConfig parses a Cluster object from a multiobject yaml file in disk
// and sets defaults if necessary.
func GetClusterConfig(fileName string) (*Cluster, error) {
clusterConfig := &Cluster{}
err := ParseClusterConfig(fileName, clusterConfig)
if err != nil {
return clusterConfig, err
}
if err := setClusterDefaults(clusterConfig); err != nil {
return clusterConfig, err
}
return clusterConfig, nil
}
// GetClusterConfigFromContent parses a Cluster object from a multiobject yaml content.
func GetClusterConfigFromContent(content []byte) (*Cluster, error) {
clusterConfig := &Cluster{}
err := ParseClusterConfigFromContent(content, clusterConfig)
if err != nil {
return clusterConfig, err
}
return clusterConfig, nil
}
// GetClusterConfig parses a Cluster object from a multiobject yaml file in disk
// sets defaults if necessary and validates the Cluster.
func GetAndValidateClusterConfig(fileName string) (*Cluster, error) {
clusterConfig, err := GetClusterConfig(fileName)
if err != nil {
return nil, err
}
err = ValidateClusterConfigContent(clusterConfig)
if err != nil {
return nil, err
}
return clusterConfig, nil
}
// GetClusterDefaultKubernetesVersion returns the default kubernetes version for a Cluster.
func GetClusterDefaultKubernetesVersion() KubernetesVersion {
return Kube127
}
// ValidateClusterConfigContent validates a Cluster object without modifying it
// Some of the validations are a bit heavy and need a network connection.
func ValidateClusterConfigContent(clusterConfig *Cluster) error {
for _, v := range clusterConfigValidations {
if err := v(clusterConfig); err != nil {
return err
}
}
return nil
}
// ParseClusterConfig unmarshalls an API object implementing the KindAccessor interface
// from a multiobject yaml file in disk. It doesn't set defaults nor validates the object.
func ParseClusterConfig(fileName string, clusterConfig KindAccessor) error {
content, err := os.ReadFile(fileName)
if err != nil {
return fmt.Errorf("unable to read file due to: %v", err)
}
if err = ParseClusterConfigFromContent(content, clusterConfig); err != nil {
return fmt.Errorf("unable to parse %s file: %v", fileName, err)
}
return nil
}
type kindObject struct {
Kind string `json:"kind,omitempty"`
}
// ParseClusterConfigFromContent unmarshalls an API object implementing the KindAccessor interface
// from a multiobject yaml content. It doesn't set defaults nor validates the object.
func ParseClusterConfigFromContent(content []byte, clusterConfig KindAccessor) error {
for _, c := range strings.Split(string(content), YamlSeparator) {
k := &kindObject{}
if err := yaml.Unmarshal([]byte(c), k); err != nil {
return err
}
if k.Kind == clusterConfig.ExpectedKind() {
return yaml.UnmarshalStrict([]byte(c), clusterConfig)
}
}
return fmt.Errorf("yamlop content is invalid or does not contain kind %s", clusterConfig.ExpectedKind())
}
func (c *Cluster) PauseReconcile() {
if c.Annotations == nil {
c.Annotations = map[string]string{}
}
c.Annotations[pausedAnnotation] = "true"
}
func (c *Cluster) ClearPauseAnnotation() {
if c.Annotations != nil {
delete(c.Annotations, pausedAnnotation)
}
}
// RegistryAuth returns whether registry requires authentication or not.
func (c *Cluster) RegistryAuth() bool {
if c.Spec.RegistryMirrorConfiguration == nil {
return false
}
return c.Spec.RegistryMirrorConfiguration.Authenticate
}
func (c *Cluster) ProxyConfiguration() map[string]string {
if c.Spec.ProxyConfiguration == nil {
return nil
}
noProxyList := append(c.Spec.ProxyConfiguration.NoProxy, c.Spec.ClusterNetwork.Pods.CidrBlocks...)
noProxyList = append(noProxyList, c.Spec.ClusterNetwork.Services.CidrBlocks...)
if c.Spec.ControlPlaneConfiguration.Endpoint != nil && c.Spec.ControlPlaneConfiguration.Endpoint.Host != "" {
noProxyList = append(
noProxyList,
c.Spec.ControlPlaneConfiguration.Endpoint.Host,
)
}
return map[string]string{
"HTTP_PROXY": c.Spec.ProxyConfiguration.HttpProxy,
"HTTPS_PROXY": c.Spec.ProxyConfiguration.HttpsProxy,
"NO_PROXY": strings.Join(noProxyList[:], ","),
}
}
func (c *Cluster) IsReconcilePaused() bool {
if s, ok := c.Annotations[pausedAnnotation]; ok {
return s == "true"
}
return false
}
func ValidateClusterName(clusterName string) error {
// this regex will not work for AWS provider as CFN has restrictions with UPPERCASE chars;
// if you are using AWS provider please use only lowercase chars
allowedClusterNameRegex := regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9-]+$`)
if !allowedClusterNameRegex.MatchString(clusterName) {
return fmt.Errorf("%v is not a valid cluster name, cluster names must start with lowercase/uppercase letters and can include numbers and dashes. For instance 'testCluster-123' is a valid name but '123testCluster' is not. ", clusterName)
}
return nil
}
func ValidateClusterNameLength(clusterName string) error {
// vSphere has the maximum length for clusters to be 80 chars
if len(clusterName) > 80 {
return fmt.Errorf("number of characters in %v should be less than 81", clusterName)
}
return nil
}
func validateClusterConfigName(clusterConfig *Cluster) error {
err := ValidateClusterName(clusterConfig.ObjectMeta.Name)
if err != nil {
return fmt.Errorf("failed to validate cluster config name: %v", err)
}
err = ValidateClusterNameLength(clusterConfig.ObjectMeta.Name)
if err != nil {
return fmt.Errorf("failed to validate cluster config name: %v", err)
}
return nil
}
func validateExternalEtcdSupport(cluster *Cluster) error {
if cluster.Spec.DatacenterRef.Kind == TinkerbellDatacenterKind {
if cluster.Spec.ExternalEtcdConfiguration != nil {
return errors.New("tinkerbell external etcd configuration is unsupported")
}
}
return nil
}
func validateMachineGroupRefs(cluster *Cluster) error {
if cluster.Spec.DatacenterRef.Kind != DockerDatacenterKind {
if cluster.Spec.ControlPlaneConfiguration.MachineGroupRef == nil {
return errors.New("must specify machineGroupRef control plane machines")
}
for _, workerNodeGroupConfiguration := range cluster.Spec.WorkerNodeGroupConfigurations {
if workerNodeGroupConfiguration.MachineGroupRef == nil {
return errors.New("must specify machineGroupRef for worker nodes")
}
}
if cluster.Spec.ExternalEtcdConfiguration != nil && cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef == nil {
return errors.New("must specify machineGroupRef for etcd machines")
}
}
return nil
}
func validateControlPlaneReplicas(clusterConfig *Cluster) error {
if clusterConfig.Spec.ControlPlaneConfiguration.Count <= 0 {
return errors.New("control plane node count must be positive")
}
if clusterConfig.Spec.ExternalEtcdConfiguration != nil {
// For unstacked/external etcd, controlplane replicas can be any number including even numbers.
return nil
}
if clusterConfig.Spec.ControlPlaneConfiguration.Count%2 == 0 {
return errors.New("control plane node count cannot be an even number")
}
if clusterConfig.Spec.ControlPlaneConfiguration.Count != 3 && clusterConfig.Spec.ControlPlaneConfiguration.Count != 5 {
if clusterConfig.Spec.DatacenterRef.Kind != DockerDatacenterKind {
logger.Info("Warning: The recommended number of control plane nodes is 3 or 5")
}
}
return nil
}
func validateControlPlaneLabels(clusterConfig *Cluster) error {
if err := validateNodeLabels(clusterConfig.Spec.ControlPlaneConfiguration.Labels, field.NewPath("spec", "controlPlaneConfiguration", "labels")); err != nil {
return fmt.Errorf("labels for control plane not valid: %v", err)
}
return nil
}
func validateControlPlaneEndpoint(clusterConfig *Cluster) error {
if (clusterConfig.Spec.ControlPlaneConfiguration.Endpoint == nil || len(clusterConfig.Spec.ControlPlaneConfiguration.Endpoint.Host) <= 0) && clusterConfig.Spec.DatacenterRef.Kind != DockerDatacenterKind {
return errors.New("cluster controlPlaneConfiguration.Endpoint.Host is not set or is empty")
}
return nil
}
func validateWorkerNodeGroups(clusterConfig *Cluster) error {
workerNodeGroupConfigs := clusterConfig.Spec.WorkerNodeGroupConfigurations
if len(workerNodeGroupConfigs) <= 0 {
if clusterConfig.Spec.DatacenterRef.Kind == TinkerbellDatacenterKind {
logger.Info("Warning: No configurations provided for worker node groups, pods will be scheduled on control-plane nodes")
} else {
return fmt.Errorf("WorkerNodeGroupConfigs cannot be empty for %s", clusterConfig.Spec.DatacenterRef.Kind)
}
}
workerNodeGroupNames := make(map[string]bool, len(workerNodeGroupConfigs))
noExecuteNoScheduleTaintedNodeGroups := make(map[string]struct{})
for i, workerNodeGroupConfig := range workerNodeGroupConfigs {
if workerNodeGroupConfig.Name == "" {
return errors.New("must specify name for worker nodes")
}
if workerNodeGroupConfig.Count == nil {
// This block should never fire. If it does, it means we have a bug in how we set our defaults.
// When Count == nil it should be set to 1 by SetDefaults method prior to reaching validation.
return errors.New("worker node count must be >= 0")
}
if err := validateAutoscalingConfig(&workerNodeGroupConfig); err != nil {
return fmt.Errorf("validating autoscaling configuration: %v", err)
}
if err := validateMDUpgradeRolloutStrategy(&workerNodeGroupConfig); err != nil {
return fmt.Errorf("validating upgrade rollout strategy configuration: %v", err)
}
if workerNodeGroupNames[workerNodeGroupConfig.Name] {
return errors.New("worker node group names must be unique")
}
if len(workerNodeGroupConfig.Taints) != 0 {
for _, taint := range workerNodeGroupConfig.Taints {
if taint.Effect == "NoExecute" || taint.Effect == "NoSchedule" {
noExecuteNoScheduleTaintedNodeGroups[workerNodeGroupConfig.Name] = struct{}{}
}
}
}
workerNodeGroupField := fmt.Sprintf("workerNodeGroupConfigurations[%d]", i)
if err := validateNodeLabels(workerNodeGroupConfig.Labels, field.NewPath("spec", workerNodeGroupField, "labels")); err != nil {
return fmt.Errorf("labels for worker node group %v not valid: %v", workerNodeGroupConfig.Name, err)
}
workerNodeGroupNames[workerNodeGroupConfig.Name] = true
}
if len(workerNodeGroupConfigs) > 0 && len(noExecuteNoScheduleTaintedNodeGroups) == len(workerNodeGroupConfigs) {
return errors.New("at least one WorkerNodeGroupConfiguration must not have NoExecute and/or NoSchedule taints")
}
if len(workerNodeGroupConfigs) == 0 && len(clusterConfig.Spec.ControlPlaneConfiguration.Taints) != 0 {
return errors.New("cannot taint control plane when there is no worker node")
}
if len(workerNodeGroupConfigs) == 0 && clusterConfig.Spec.KubernetesVersion <= Kube121 {
return errors.New("Empty workerNodeGroupConfigs is not supported for kube version <= 1.21")
}
return nil
}
func validateAutoscalingConfig(w *WorkerNodeGroupConfiguration) error {
if w == nil {
return nil
}
if w.AutoScalingConfiguration == nil && *w.Count < 0 {
return errors.New("worker node count must be zero or greater if autoscaling is not enabled")
}
if w.AutoScalingConfiguration == nil {
return nil
}
if w.AutoScalingConfiguration.MinCount < 0 {
return errors.New("min count must be non negative")
}
if w.AutoScalingConfiguration.MinCount > w.AutoScalingConfiguration.MaxCount {
return errors.New("min count must be no greater than max count")
}
if w.AutoScalingConfiguration.MinCount > *w.Count {
return errors.New("min count must be less than or equal to count")
}
if w.AutoScalingConfiguration.MaxCount < *w.Count {
return errors.New("max count must be greater than or equal to count")
}
return nil
}
func validateNodeLabels(labels map[string]string, fldPath *field.Path) error {
errList := validation.ValidateLabels(labels, fldPath)
if len(errList) != 0 {
return fmt.Errorf("found following errors with labels: %v", errList.ToAggregate().Error())
}
return nil
}
func validateEtcdReplicas(clusterConfig *Cluster) error {
if clusterConfig.Spec.ExternalEtcdConfiguration == nil {
return nil
}
if clusterConfig.Spec.ExternalEtcdConfiguration.Count == 0 {
return errors.New("no value set for etcd replicas")
}
if clusterConfig.Spec.ExternalEtcdConfiguration.Count < 0 {
return errors.New("etcd replicas cannot be a negative number")
}
if clusterConfig.Spec.ExternalEtcdConfiguration.Count%2 == 0 {
return errors.New("external etcd count cannot be an even number")
}
if clusterConfig.Spec.ExternalEtcdConfiguration.Count != 3 && clusterConfig.Spec.ExternalEtcdConfiguration.Count != 5 {
if clusterConfig.Spec.DatacenterRef.Kind != DockerDatacenterKind {
// only log warning about recommended etcd cluster size for providers other than docker
logger.Info("Warning: The recommended size of an external etcd cluster is 3 or 5")
}
}
return nil
}
func validateNetworking(clusterConfig *Cluster) error {
clusterNetwork := clusterConfig.Spec.ClusterNetwork
if len(clusterNetwork.Pods.CidrBlocks) <= 0 {
return errors.New("pods CIDR block not specified or empty")
}
if len(clusterNetwork.Services.CidrBlocks) <= 0 {
return errors.New("services CIDR block not specified or empty")
}
if len(clusterNetwork.Pods.CidrBlocks) > 1 {
return fmt.Errorf("multiple CIDR blocks for Pods are not yet supported")
}
if len(clusterNetwork.Services.CidrBlocks) > 1 {
return fmt.Errorf("multiple CIDR blocks for Services are not yet supported")
}
_, podCIDRIPNet, err := net.ParseCIDR(clusterNetwork.Pods.CidrBlocks[0])
if err != nil {
return fmt.Errorf("invalid CIDR block format for Pods: %s. Please specify a valid CIDR block for pod subnet", clusterNetwork.Pods)
}
_, serviceCIDRIPNet, err := net.ParseCIDR(clusterNetwork.Services.CidrBlocks[0])
if err != nil {
return fmt.Errorf("invalid CIDR block for Services: %s. Please specify a valid CIDR block for service subnet", clusterNetwork.Services)
}
if clusterConfig.Spec.DatacenterRef.Kind == SnowDatacenterKind {
controlPlaneEndpoint := net.ParseIP(clusterConfig.Spec.ControlPlaneConfiguration.Endpoint.Host)
if controlPlaneEndpoint == nil {
return fmt.Errorf("control plane endpoint %s is invalid", clusterConfig.Spec.ControlPlaneConfiguration.Endpoint.Host)
}
if podCIDRIPNet.Contains(controlPlaneEndpoint) {
return fmt.Errorf("control plane endpoint %s conflicts with pods CIDR block %s", clusterConfig.Spec.ControlPlaneConfiguration.Endpoint.Host, clusterNetwork.Pods.CidrBlocks[0])
}
if serviceCIDRIPNet.Contains(controlPlaneEndpoint) {
return fmt.Errorf("control plane endpoint %s conflicts with services CIDR block %s", clusterConfig.Spec.ControlPlaneConfiguration.Endpoint.Host, clusterNetwork.Services.CidrBlocks[0])
}
}
podMaskSize, _ := podCIDRIPNet.Mask.Size()
nodeCidrMaskSize := constants.DefaultNodeCidrMaskSize
if clusterNetwork.Nodes != nil && clusterNetwork.Nodes.CIDRMaskSize != nil {
nodeCidrMaskSize = *clusterNetwork.Nodes.CIDRMaskSize
}
// the pod subnet mask needs to allow one or multiple node-masks
// i.e. if it has a /24 the node mask must be between 24 and 32 for ipv4
// the below validations are run by kubeadm and we are bubbling those up here for better customer experience
if podMaskSize >= nodeCidrMaskSize {
return fmt.Errorf("the size of pod subnet with mask %d is smaller than or equal to the size of node subnet with mask %d", podMaskSize, nodeCidrMaskSize)
} else if (nodeCidrMaskSize - podMaskSize) > podSubnetNodeMaskMaxDiff {
// PodSubnetNodeMaskMaxDiff is limited to 16 due to an issue with uncompressed IP bitmap in core
// The node subnet mask size must be no more than the pod subnet mask size + 16
return fmt.Errorf("pod subnet mask (%d) and node-mask (%d) difference is greater than %d", podMaskSize, nodeCidrMaskSize, podSubnetNodeMaskMaxDiff)
}
return validateCNIPlugin(clusterNetwork)
}
func validateCNIPlugin(network ClusterNetwork) error {
if network.CNI != "" {
if network.CNIConfig != nil {
return fmt.Errorf("invalid format for cni plugin: both old and new formats used, use only the CNIConfig field")
}
logger.Info("Warning: CNI field is deprecated. Provide CNI information through CNIConfig")
if _, ok := validCNIs[network.CNI]; !ok {
return fmt.Errorf("cni %s not supported", network.CNI)
}
return nil
}
return validateCNIConfig(network.CNIConfig)
}
func validateCNIConfig(cniConfig *CNIConfig) error {
if cniConfig == nil {
return fmt.Errorf("cni not specified")
}
var cniPluginSpecified int
var allErrs []error
if cniConfig.Cilium != nil {
cniPluginSpecified++
if err := validateCiliumConfig(cniConfig.Cilium); err != nil {
allErrs = append(allErrs, err)
}
}
if cniConfig.Kindnetd != nil {
cniPluginSpecified++
}
if cniPluginSpecified == 0 {
allErrs = append(allErrs, fmt.Errorf("no cni plugin specified"))
} else if cniPluginSpecified > 1 {
allErrs = append(allErrs, fmt.Errorf("cannot specify more than one cni plugins"))
}
if len(allErrs) > 0 {
aggregate := utilerrors.NewAggregate(allErrs)
return fmt.Errorf("validating cniConfig: %v", aggregate)
}
return nil
}
func validateCiliumConfig(cilium *CiliumConfig) error {
if cilium == nil {
return nil
}
if !cilium.IsManaged() {
if cilium.PolicyEnforcementMode != "" {
return errors.New("when using skipUpgrades for cilium all other fields must be empty")
}
}
if cilium.PolicyEnforcementMode == "" {
return nil
}
if !validCiliumPolicyEnforcementModes[cilium.PolicyEnforcementMode] {
return fmt.Errorf("cilium policyEnforcementMode \"%s\" not supported", cilium.PolicyEnforcementMode)
}
return nil
}
func validateProxyConfig(clusterConfig *Cluster) error {
if clusterConfig.Spec.ProxyConfiguration == nil {
return nil
}
if clusterConfig.Spec.ProxyConfiguration.HttpProxy == "" {
return errors.New("no value set for httpProxy")
}
if clusterConfig.Spec.ProxyConfiguration.HttpsProxy == "" {
return errors.New("no value set for httpsProxy")
}
if err := validateProxyData(clusterConfig.Spec.ProxyConfiguration.HttpProxy); err != nil {
return err
}
if err := validateProxyData(clusterConfig.Spec.ProxyConfiguration.HttpsProxy); err != nil {
return err
}
return nil
}
func validateProxyData(proxy string) error {
var proxyHost string
if strings.HasPrefix(proxy, "http") {
u, err := url.ParseRequestURI(proxy)
if err != nil {
return fmt.Errorf("proxy %s is invalid, please provide a valid URI", proxy)
}
proxyHost = u.Host
} else {
proxyHost = proxy
}
host, port, err := net.SplitHostPort(proxyHost)
if err != nil {
return fmt.Errorf("proxy endpoint %s is invalid (%s), please provide a valid proxy address", proxy, err)
}
_, err = net.DefaultResolver.LookupIPAddr(context.Background(), host)
if err != nil && net.ParseIP(host) == nil {
return fmt.Errorf("proxy endpoint %s is invalid, please provide a valid proxy domain name or ip: %v", host, err)
}
if p, err := strconv.Atoi(port); err != nil || p < 1 || p > 65535 {
return fmt.Errorf("proxy port %s is invalid, please provide a valid proxy port", port)
}
return nil
}
func validateMirrorConfig(clusterConfig *Cluster) error {
if clusterConfig.Spec.RegistryMirrorConfiguration == nil {
return nil
}
if clusterConfig.Spec.RegistryMirrorConfiguration.Endpoint == "" {
return errors.New("no value set for RegistryMirrorConfiguration.Endpoint")
}
if !networkutils.IsPortValid(clusterConfig.Spec.RegistryMirrorConfiguration.Port) {
return fmt.Errorf("registry mirror port %s is invalid, please provide a valid port", clusterConfig.Spec.RegistryMirrorConfiguration.Port)
}
mirrorCount := 0
ociNamespaces := clusterConfig.Spec.RegistryMirrorConfiguration.OCINamespaces
for _, ociNamespace := range ociNamespaces {
if ociNamespace.Registry == "" {
return errors.New("registry can't be set to empty in OCINamespaces")
}
if re.MatchString(ociNamespace.Registry) {
mirrorCount++
// More than one mirror for curated package would introduce ambiguity in the package controller
if mirrorCount > 1 {
return errors.New("only one registry mirror for curated packages is suppported")
}
}
}
if mirrorCount == 1 {
// BottleRocket accepts only one registry mirror and that is hardcoded for public.ecr.aws at this moment.
// Such a validation will be removed once CAPI is patched to support more than one endpoints for BottleRocket.
if ociNamespaces[0].Registry != constants.DefaultCoreEKSARegistry {
return errors.New("registry must be public.ecr.aws when only one mapping is specified")
}
}
return nil
}
func validateIdentityProviderRefs(clusterConfig *Cluster) error {
refs := clusterConfig.Spec.IdentityProviderRefs
if len(refs) == 0 {
return nil
}
for _, ref := range refs {
if ref.Kind != OIDCConfigKind && ref.Kind != AWSIamConfigKind {
return fmt.Errorf("kind: %s for identityProviderRef is not supported", ref.Kind)
}
if ref.Name == "" {
return errors.New("specify a valid name for identityProviderRef")
}
}
return nil
}
func validateGitOps(clusterConfig *Cluster) error {
gitOpsRef := clusterConfig.Spec.GitOpsRef
if gitOpsRef == nil {
return nil
}
gitOpsRefKind := gitOpsRef.Kind
if gitOpsRefKind != GitOpsConfigKind && gitOpsRefKind != FluxConfigKind {
return errors.New("only GitOpsConfig or FluxConfig Kind are supported at this time")
}
if gitOpsRef.Name == "" {
return errors.New("GitOpsRef name can't be empty; specify a valid GitOpsConfig name")
}
return nil
}
func validatePodIAMConfig(clusterConfig *Cluster) error {
if clusterConfig.Spec.PodIAMConfig == nil {
return nil
}
if clusterConfig.Spec.PodIAMConfig.ServiceAccountIssuer == "" {
return errors.New("ServiceAccount Issuer can't be empty while configuring IAM roles for pods")
}
return nil
}
func validateCPUpgradeRolloutStrategy(clusterConfig *Cluster) error {
if clusterConfig.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy == nil {
return nil
}
if clusterConfig.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.Type != "RollingUpdate" {
return fmt.Errorf("ControlPlaneConfiguration: only 'RollingUpdate' supported for upgrade rollout strategy type")
}
if clusterConfig.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge < 0 {
return fmt.Errorf("ControlPlaneConfiguration: maxSurge for control plane cannot be a negative value")
}
if clusterConfig.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge > 1 {
return fmt.Errorf("ControlPlaneConfiguration: maxSurge for control plane must be 0 or 1")
}
return nil
}
func validateMDUpgradeRolloutStrategy(w *WorkerNodeGroupConfiguration) error {
if w.UpgradeRolloutStrategy == nil {
return nil
}
if w.UpgradeRolloutStrategy.Type != "RollingUpdate" {
return fmt.Errorf("WorkerNodeGroupConfiguration: only 'RollingUpdate' supported for upgrade rollout strategy type")
}
if w.UpgradeRolloutStrategy.RollingUpdate.MaxSurge < 0 || w.UpgradeRolloutStrategy.RollingUpdate.MaxUnavailable < 0 {
return fmt.Errorf("WorkerNodeGroupConfiguration: maxSurge and maxUnavailable values cannot be negative")
}
if w.UpgradeRolloutStrategy.RollingUpdate.MaxSurge == 0 && w.UpgradeRolloutStrategy.RollingUpdate.MaxUnavailable == 0 {
return fmt.Errorf("WorkerNodeGroupConfiguration: maxSurge and maxUnavailable not specified or are 0. maxSurge and maxUnavailable cannot both be 0")
}
return nil
}
func validatePackageControllerConfiguration(clusterConfig *Cluster) error {
if clusterConfig.IsManaged() {
if clusterConfig.Spec.Packages != nil {
if clusterConfig.Spec.Packages.Controller != nil {
return fmt.Errorf("packages: controller should not be specified for a workload cluster")
}
if clusterConfig.Spec.Packages.CronJob != nil {
return fmt.Errorf("packages: cronjob should not be specified for a workload cluster")
}
}
}
return nil
}
func validateCloudStackK8sVersion(cluster *Cluster) error {
if cluster.Spec.DatacenterRef.Kind == CloudStackDatacenterKind {
return ValidateCloudStackK8sVersion(cluster.Spec.KubernetesVersion)
}
return nil
}
// ValidateCloudStackK8sVersion validates version is supported by CAPC.
func ValidateCloudStackK8sVersion(version KubernetesVersion) error {
kubeVersionSemver, err := semver.New(string(version) + ".0")
if err != nil {
return fmt.Errorf("converting kubeVersion %v to semver %v", version, err)
}
kube125Semver, _ := semver.New(string(Kube125) + ".0")
if kubeVersionSemver.Compare(kube125Semver) != -1 {
return errors.New("cloudstack provider does not support K8s version > 1.24")
}
return nil
}
| 883 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"os"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
var clusterDefaults = []func(*Cluster) error{
setRegistryMirrorConfigDefaults,
setWorkerNodeGroupDefaults,
setCNIConfigDefault,
}
func setClusterDefaults(cluster *Cluster) error {
for _, d := range clusterDefaults {
if err := d(cluster); err != nil {
return err
}
}
return nil
}
func setRegistryMirrorConfigDefaults(clusterConfig *Cluster) error {
if clusterConfig.Spec.RegistryMirrorConfiguration == nil {
return nil
}
if clusterConfig.Spec.RegistryMirrorConfiguration.Port == "" {
logger.V(1).Info("RegistryMirrorConfiguration.Port is not specified, default port will be used", "Default Port", constants.DefaultHttpsPort)
clusterConfig.Spec.RegistryMirrorConfiguration.Port = constants.DefaultHttpsPort
}
if clusterConfig.Spec.RegistryMirrorConfiguration.CACertContent == "" {
if caCert, set := os.LookupEnv(RegistryMirrorCAKey); set && len(caCert) > 0 {
content, err := os.ReadFile(caCert)
if err != nil {
return fmt.Errorf("reading the cert file %s: %v", caCert, err)
}
logger.V(4).Info(fmt.Sprintf("%s is set, using %s as ca cert for registry", RegistryMirrorCAKey, caCert))
clusterConfig.Spec.RegistryMirrorConfiguration.CACertContent = string(content)
}
}
return nil
}
func setWorkerNodeGroupDefaults(cluster *Cluster) error {
if len(cluster.Spec.WorkerNodeGroupConfigurations) >= 1 && cluster.Spec.WorkerNodeGroupConfigurations[0].Name == "" {
logger.V(1).Info("First worker node group name not specified. Defaulting name to md-0.")
cluster.Spec.WorkerNodeGroupConfigurations[0].Name = "md-0"
}
for i := range cluster.Spec.WorkerNodeGroupConfigurations {
w := &cluster.Spec.WorkerNodeGroupConfigurations[i]
if w.Count == nil && w.AutoScalingConfiguration != nil {
w.Count = &w.AutoScalingConfiguration.MinCount
} else if w.Count == nil {
w.Count = ptr.Int(1)
}
}
return nil
}
func setCNIConfigDefault(cluster *Cluster) error {
if cluster.Spec.ClusterNetwork.CNIConfig != nil {
return nil
}
cluster.Spec.ClusterNetwork.CNIConfig = &CNIConfig{}
switch cluster.Spec.ClusterNetwork.CNI {
case Cilium, CiliumEnterprise:
cluster.Spec.ClusterNetwork.CNIConfig.Cilium = &CiliumConfig{}
case Kindnetd:
cluster.Spec.ClusterNetwork.CNIConfig.Kindnetd = &KindnetdConfig{}
}
cluster.Spec.ClusterNetwork.CNI = ""
return nil
}
| 82 |
eks-anywhere | aws | Go | package v1alpha1
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestSetClusterDefaults(t *testing.T) {
tests := []struct {
name string
in, wantCluster *Cluster
wantErr string
}{
{
name: "worker node group - no name specified",
in: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{},
},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{},
},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: "",
},
{
name: "cni plugin - old format in input, set new format",
in: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ClusterNetwork: ClusterNetwork{
CNI: Cilium,
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{},
},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: "",
},
{
name: "worker node group - no count specified with autoscaler",
in: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "worker-0",
AutoScalingConfiguration: &AutoScalingConfiguration{
MinCount: 3,
MaxCount: 5,
},
}},
},
},
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: nil,
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "worker-0",
Count: ptr.Int(3),
AutoScalingConfiguration: &AutoScalingConfiguration{
MinCount: 3,
MaxCount: 5,
},
}},
},
},
wantErr: "",
},
{
name: "worker node group - no count specified",
in: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "worker-0",
}},
},
},
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: nil,
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "worker-0",
Count: ptr.Int(1),
}},
},
},
wantErr: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
gotErr := setClusterDefaults(tt.in)
if tt.wantErr == "" {
g.Expect(gotErr).To(BeNil())
} else {
g.Expect(gotErr).To(MatchError(ContainSubstring(tt.wantErr)))
}
g.Expect(tt.in).To(Equal(tt.wantCluster))
})
}
}
| 262 |
eks-anywhere | aws | Go | package v1alpha1
import (
"errors"
"fmt"
"reflect"
"strings"
"testing"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestValidateClusterName(t *testing.T) {
tests := []struct {
clusterName, name string
wantErr error
}{
{
name: "FailureSpecialChars",
clusterName: "test-cluster@123_",
wantErr: errors.New("test-cluster@123_ is not a valid cluster name, cluster names must start with lowercase/uppercase letters and can include numbers and dashes. For instance 'testCluster-123' is a valid name but '123testCluster' is not. "),
},
{
name: "FailureDotChars",
clusterName: "test-cluster1.20",
wantErr: errors.New("test-cluster1.20 is not a valid cluster name, cluster names must start with lowercase/uppercase letters and can include numbers and dashes. For instance 'testCluster-123' is a valid name but '123testCluster' is not. "),
},
{
name: "FailureFirstCharNumeric",
clusterName: "123test-Cluster",
wantErr: errors.New("123test-Cluster is not a valid cluster name, cluster names must start with lowercase/uppercase letters and can include numbers and dashes. For instance 'testCluster-123' is a valid name but '123testCluster' is not. "),
},
{
name: "SuccessUpperCaseChars",
clusterName: "test-Cluster",
wantErr: nil,
},
{
name: "SuccessLowerCase",
clusterName: "test-cluster",
wantErr: nil,
},
{
name: "SuccessLowerCaseDashNumeric",
clusterName: "test-cluster123",
wantErr: nil,
},
{
name: "SuccessLowerCaseNumeric",
clusterName: "test123cluster",
wantErr: nil,
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
got := ValidateClusterName(tc.clusterName)
if !reflect.DeepEqual(tc.wantErr, got) {
t.Errorf("%v got = %v, want %v", tc.name, got, tc.wantErr)
}
})
}
}
func TestClusterNameLength(t *testing.T) {
tests := []struct {
clusterName, name string
wantErr error
}{
{
name: "SuccessClusterNameLength",
clusterName: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm",
wantErr: nil,
},
{
name: "FailureClusterNameLength",
clusterName: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345",
wantErr: errors.New("number of characters in qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345 should be less than 81"),
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
got := ValidateClusterNameLength(tc.clusterName)
if !reflect.DeepEqual(tc.wantErr, got) {
t.Errorf("%v got = %v, want %v", tc.name, got, tc.wantErr)
}
})
}
}
func TestValidateExternalEtcdSupport(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
wantCluster *Cluster
wantErr bool
}{
{
name: "tinkerbell config without external etcd",
wantCluster: &Cluster{
Spec: ClusterSpec{
DatacenterRef: Ref{
Kind: TinkerbellDatacenterKind,
Name: "eksa-unit-test",
},
},
},
wantErr: false,
},
{
name: "tinkerbell config with external etcd",
wantCluster: &Cluster{
Spec: ClusterSpec{
ExternalEtcdConfiguration: &ExternalEtcdConfiguration{Count: 1},
DatacenterRef: Ref{
Kind: TinkerbellDatacenterKind,
Name: "eksa-unit-test",
},
},
},
wantErr: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
got := validateExternalEtcdSupport(tc.wantCluster)
if tc.wantErr {
g.Expect(got).To(MatchError(ContainSubstring("external etcd configuration is unsupported")))
} else {
g.Expect(got).To(Succeed())
}
})
}
}
func TestGetAndValidateClusterConfig(t *testing.T) {
tests := []struct {
testName string
fileName string
wantCluster *Cluster
wantErr bool
}{
{
testName: "file doesn't exist",
fileName: "testdata/fake_file.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "not parseable file",
fileName: "testdata/not_parseable_cluster.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "valid 1.18",
fileName: "testdata/cluster_1_18.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube118,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
},
},
wantErr: false,
},
{
testName: "valid 1.19",
fileName: "testdata/cluster_1_19.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "valid with extra delimiters",
fileName: "testdata/cluster_extra_delimiters.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "valid 1.20",
fileName: "testdata/cluster_1_20.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube120,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "Invalid CloudStack 1.25",
fileName: "testdata/cluster_1_25_cloudstack.yaml",
wantErr: true,
},
{
testName: "namespace mismatch between cluster and datacenter",
fileName: "cluster_1_20_namespace_mismatch_between_cluster_and_datacenter.yaml",
wantErr: true,
},
{
testName: "namespace mismatch between cluster and machineconfigs",
fileName: "cluster_1_20_namespace_mismatch_between_cluster_and_machineconfigs",
wantErr: true,
},
{
testName: "valid 1.20 with non eksa resources",
fileName: "testdata/cluster_1_20_with_non_eksa_resources.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube120,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "valid different machine configs",
fileName: "testdata/cluster_different_machine_configs.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test-2",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "with valid GitOps",
fileName: "testdata/cluster_1_19_gitops.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
GitOpsRef: &Ref{
Kind: "GitOpsConfig",
Name: "test-gitops",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "with GitOps branch valid",
fileName: "testdata/cluster_1_19_gitops_branch.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
GitOpsRef: &Ref{
Kind: "GitOpsConfig",
Name: "test-gitops",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "with valid ip proxy configuration",
fileName: "testdata/cluster_valid_proxy.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
ProxyConfiguration: &ProxyConfiguration{
HttpProxy: "http://0.0.0.0:1",
HttpsProxy: "0.0.0.0:1",
NoProxy: []string{"localhost"},
},
},
},
wantErr: false,
},
{
testName: "with valid domain name proxy configuration",
fileName: "testdata/cluster_valid_domainname_proxy.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
ProxyConfiguration: &ProxyConfiguration{
HttpProxy: "http://google.com:1",
HttpsProxy: "google.com:1",
NoProxy: []string{"localhost"},
},
},
},
wantErr: false,
},
{
testName: "valid different tainted machine configs",
fileName: "testdata/cluster_valid_taints_multiple_worker_node_groups.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{
{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test-2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoSchedule,
},
},
},
{
Name: "md-1",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test-2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectPreferNoSchedule,
},
},
},
},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "with no worker node groups",
fileName: "testdata/cluster_invalid_no_worker_node_groups.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with multiple worker node groups",
fileName: "testdata/cluster_invalid_multiple_worker_node_groups.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with invalid worker node group taints",
fileName: "testdata/cluster_invalid_taints.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with GitOps branch invalid",
fileName: "testdata/cluster_1_19_gitops_invalid_branch.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with gitops invalid repo name",
fileName: "testdata/cluster_1_19_gitops_invalid_repo.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "Empty Git Provider",
fileName: "testdata/cluster_invalid_gitops_empty_gitprovider.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "Invalid Git Provider",
fileName: "testdata/cluster_invalid_gitops_invalid_gitprovider.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "Empty Git Repository",
fileName: "testdata/cluster_invalid_gitops_empty_gitrepo.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "Git Repository not set",
fileName: "testdata/cluster_invalid_gitops_unset_gitrepo.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "invalid kind",
fileName: "testdata/cluster_invalid_kinds.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "invalid cluster name",
fileName: "testdata/cluster_invalid_cluster_name.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "even controlPlaneReplicas",
fileName: "testdata/cluster_even_control_plane_replicas.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "even unstacked etcd replicas",
fileName: "testdata/unstacked_etcd_even_replicas.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "empty identity providers",
fileName: "testdata/cluster_invalid_empty_identity_providers.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "extra identity providers",
fileName: "testdata/cluster_invalid_extra_identity_providers.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "non oidc identity provider",
fileName: "testdata/cluster_invalid_non_oidc_identity_providers.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with invalid proxy ip configuration",
fileName: "testdata/cluster_invalid_proxy_ip.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with invalid proxy port configuration",
fileName: "testdata/cluster_invalid_proxy_port.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with invalid proxy missing http proxy",
fileName: "testdata/cluster_invalid_missing_http_proxy.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with invalid proxy missing https proxy",
fileName: "testdata/cluster_invalid_missing_https_proxy.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with empty CNI",
fileName: "testdata/cluster_empty_cni.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "with not supported CNI",
fileName: "testdata/cluster_not_supported_cni.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "tinkerbell without worker nodes",
fileName: "testdata/tinkerbell_cluster_without_worker_nodes.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "single-node",
},
Spec: ClusterSpec{
KubernetesVersion: Kube123,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 1,
Endpoint: &Endpoint{
Host: "10.80.8.90",
},
MachineGroupRef: &Ref{
Kind: TinkerbellMachineConfigKind,
Name: "single-node-cp",
},
},
WorkerNodeGroupConfigurations: nil,
DatacenterRef: Ref{
Kind: TinkerbellDatacenterKind,
Name: "single-node",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
ManagementCluster: ManagementCluster{
Name: "single-node",
},
},
},
wantErr: false,
},
{
testName: "tinkerbell 1.21 cluster without worker nodes",
fileName: "testdata/tinkerbell_121cluster_without_worker_nodes.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "nontinkerbell datacenter without worker nodes",
fileName: "testdata/vsphere_cluster_without_worker_nodes.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "without worker nodes but has control plane taints",
fileName: "testdata/cluster_without_worker_nodes_has_cp_taints.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "Invalid registry",
fileName: "testdata/invalid_registry.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "valid package config",
fileName: "testdata/cluster_package_configuration.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube120,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
Packages: &PackageConfiguration{
Disable: true,
},
},
},
wantErr: false,
},
{
testName: "invalid controller package configuration",
fileName: "testdata/cluster_package_configuration_invalid.yaml",
wantCluster: nil,
wantErr: true,
},
{
testName: "invalid package cronjob",
fileName: "testdata/cluster_package_cronjob_invalid.yaml",
wantCluster: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
got, err := GetAndValidateClusterConfig(tt.fileName)
if (err != nil) != tt.wantErr {
t.Fatalf("GetClusterConfig() error = %v, wantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.wantCluster) {
t.Fatalf("GetClusterConfig() = %#v, want %#v", got, tt.wantCluster)
}
})
}
}
func TestGetClusterConfig(t *testing.T) {
tests := []struct {
testName string
fileName string
wantCluster *Cluster
wantErr bool
}{
{
testName: "valid 1.18",
fileName: "testdata/cluster_1_18.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube118,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "valid 1.19",
fileName: "testdata/cluster_1_19.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "valid with extra delimiters",
fileName: "testdata/cluster_extra_delimiters.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "valid 1.20",
fileName: "testdata/cluster_1_20.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube120,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
got, err := GetClusterConfig(tt.fileName)
if (err != nil) != tt.wantErr {
t.Fatalf("GetClusterConfig() error = %v, wantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.wantCluster) {
t.Fatalf("GetClusterConfig() = %#v, want %#v", got, tt.wantCluster)
}
})
}
}
func TestParseClusterConfig(t *testing.T) {
type args struct {
fileName string
clusterConfig KindAccessor
}
tests := []struct {
name string
args args
matchError error
wantErr bool
wantCluster *Cluster
}{
{
name: "Good cluster config parse",
args: args{
fileName: "testdata/cluster_vsphere.yaml",
clusterConfig: &Cluster{},
},
wantErr: false,
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNI: Cilium,
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
},
{
name: "Invalid data type",
args: args{
fileName: "testdata/not_parseable_cluster.yaml",
clusterConfig: &Cluster{},
},
wantErr: true,
matchError: fmt.Errorf("cannot unmarshal string into Go struct field WorkerNodeGroupConfiguration.spec.workerNodeGroupConfigurations.count of type int"),
},
{
name: "Incorrect indentation",
args: args{
fileName: "testdata/incorrect_indentation.yaml",
clusterConfig: &Cluster{},
},
wantErr: true,
matchError: fmt.Errorf("converting YAML to JSON: yaml: line 12: did not find expected key"),
},
{
name: "Invalid key",
args: args{
fileName: "testdata/invalid_key.yaml",
clusterConfig: &Cluster{},
},
wantErr: true,
matchError: fmt.Errorf("unmarshaling JSON: while decoding JSON: json: unknown field \"registryMirro rConfiguration\""),
},
{
name: "Invalid yaml",
args: args{
fileName: "testdata/invalid_format.yaml",
clusterConfig: &Cluster{},
},
wantErr: true,
matchError: fmt.Errorf("unable to parse testdata/invalid_format.yaml file: error converting YAML to JSON: yaml: did not find expected node content"),
},
{
name: "Invalid spec field",
args: args{
fileName: "testdata/invalid_spec_field.yaml",
clusterConfig: &Cluster{},
},
wantErr: true,
matchError: fmt.Errorf("unmarshaling JSON: while decoding JSON: json: unknown field \"invalidField\""),
},
{
name: "Cluster definition at the end",
args: args{
fileName: "testdata/cluster_definition_at_the_end.yaml",
clusterConfig: &Cluster{},
},
wantErr: false,
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNI: Cilium,
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ParseClusterConfig(tt.args.fileName, tt.args.clusterConfig)
if (err != nil) != tt.wantErr {
t.Errorf("ParseClusterConfig() error = %v, wantErr %v", err, tt.wantErr)
}
if !tt.wantErr && !reflect.DeepEqual(tt.args.clusterConfig, tt.wantCluster) {
t.Fatalf("GetClusterConfig() = %#v, want %#v", tt.args.clusterConfig, tt.wantCluster)
}
if tt.wantErr && !strings.Contains(err.Error(), tt.matchError.Error()) {
t.Errorf("ParseClusterConfig() error = %v, wantErr %v err %v", err, tt.wantErr, tt.matchError)
}
})
}
}
func TestCluster_PauseReconcile(t *testing.T) {
tests := []struct {
name string
want string
pause bool
}{
{
name: "pause should set pause annotation",
want: "true",
pause: true,
},
{
name: "pause should set pause annotation",
pause: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster_test",
Annotations: map[string]string{},
},
}
if tt.pause {
c.PauseReconcile()
val, ok := c.Annotations["anywhere.eks.amazonaws.com/paused"]
if ok && val != tt.want {
t.Errorf("expected value on annotation is not set got=%s, want=%s", val, tt.want)
}
if !ok {
t.Errorf("pause annotation is not set")
}
}
if !tt.pause {
if _, ok := c.Annotations["anywhere.eks.amazonaws.com/paused"]; ok {
t.Errorf("pause annotation is shouldn't be set")
}
}
})
}
}
func TestCluster_IsReconcilePaused(t *testing.T) {
tests := []struct {
name string
want bool
pause bool
}{
{
name: "reconcile is paused",
want: true,
pause: true,
},
{
name: "reconcile is not paused",
pause: false,
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster_test",
Annotations: map[string]string{},
},
}
if tt.pause {
c.PauseReconcile()
}
if got := c.IsReconcilePaused(); got != tt.want {
t.Errorf("IsReconcilePaused() = %v, want %v", got, tt.want)
}
})
}
}
func TestGitOpsEquals(t *testing.T) {
tests := []struct {
name string
want bool
prev *GitOpsConfigSpec
new *GitOpsConfigSpec
}{
{
name: "previous and new == nil",
want: true,
prev: nil,
new: nil,
},
{
name: "previous == nil",
want: false,
prev: nil,
new: &GitOpsConfigSpec{},
},
{
name: "previous == nil",
want: false,
prev: &GitOpsConfigSpec{},
new: nil,
},
{
name: "previous == new",
want: true,
prev: &GitOpsConfigSpec{
Flux: Flux{
Github: Github{
Owner: "owner",
Repository: "repo",
FluxSystemNamespace: "namespace",
Branch: "main",
ClusterConfigPath: "path/test",
Personal: false,
},
},
},
new: &GitOpsConfigSpec{
Flux: Flux{
Github: Github{
Owner: "owner",
Repository: "repo",
FluxSystemNamespace: "namespace",
Branch: "main",
ClusterConfigPath: "path/test",
Personal: false,
},
},
},
},
{
name: "previous != new",
want: false,
prev: &GitOpsConfigSpec{Flux: Flux{
Github: Github{
Owner: "owner",
Repository: "repo",
FluxSystemNamespace: "namespace",
Branch: "main",
ClusterConfigPath: "path/test",
Personal: false,
},
}},
new: &GitOpsConfigSpec{
Flux: Flux{
Github: Github{
Owner: "owner",
Repository: "new-repo",
FluxSystemNamespace: "namespace",
Branch: "main",
ClusterConfigPath: "path/test",
Personal: false,
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.want != tt.prev.Equal(tt.new) {
t.Errorf("GitOps %+v should be equals to %+v", tt.prev, tt.new)
}
})
}
}
func TestEndPointEquals(t *testing.T) {
tests := []struct {
name string
datacenterRefKind string
want bool
prev *Endpoint
new *Endpoint
}{
{
name: "previous and new == nil",
datacenterRefKind: VSphereDatacenterKind,
want: true,
prev: nil,
new: nil,
},
{
name: "previous == nil",
datacenterRefKind: VSphereDatacenterKind,
want: false,
prev: nil,
new: &Endpoint{},
},
{
name: "new == nil",
datacenterRefKind: VSphereDatacenterKind,
want: false,
prev: &Endpoint{},
new: nil,
},
{
name: "previous == new",
datacenterRefKind: VSphereDatacenterKind,
want: true,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "host"},
},
{
name: "previous != new",
datacenterRefKind: VSphereDatacenterKind,
want: false,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "new-host"},
},
{
name: "same host, no port",
datacenterRefKind: CloudStackDatacenterKind,
want: true,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "host"},
},
{
name: "same host, same default port",
want: true,
datacenterRefKind: CloudStackDatacenterKind,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host:6443"},
},
{
name: "same host, same custom port",
datacenterRefKind: CloudStackDatacenterKind,
want: true,
prev: &Endpoint{Host: "host:6442"},
new: &Endpoint{Host: "host:6442"},
},
{
name: "different host, no port",
datacenterRefKind: CloudStackDatacenterKind,
want: false,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "new-host"},
},
{
name: "different host, different port",
datacenterRefKind: CloudStackDatacenterKind,
want: false,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "new-host:6442"},
},
{
name: "same host, old custom port, new no port",
datacenterRefKind: CloudStackDatacenterKind,
want: false,
prev: &Endpoint{Host: "host:6442"},
new: &Endpoint{Host: "host"},
},
{
name: "same host, old default port, new no port",
datacenterRefKind: CloudStackDatacenterKind,
want: true,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host"},
},
{
name: "same host, old no port, new custom port",
datacenterRefKind: CloudStackDatacenterKind,
want: false,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "host:6442"},
},
{
name: "same host, old no port, new default port",
datacenterRefKind: CloudStackDatacenterKind,
want: true,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "host:6443"},
},
{
name: "same host, old default port, new no port",
datacenterRefKind: CloudStackDatacenterKind,
want: true,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host"},
},
{
name: "invalid host",
datacenterRefKind: CloudStackDatacenterKind,
want: false,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host::"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.want != tt.prev.Equal(tt.new, tt.datacenterRefKind) {
t.Errorf("Endpoint %+v should be equals to %+v", tt.prev, tt.new)
}
})
}
}
func TestCloudStackEndPointEquals(t *testing.T) {
tests := []struct {
name string
want bool
prev *Endpoint
new *Endpoint
}{
{
name: "previous and new == nil",
want: true,
prev: nil,
new: nil,
},
{
name: "previous == nil",
want: false,
prev: nil,
new: &Endpoint{},
},
{
name: "previous == nil",
want: false,
prev: &Endpoint{},
new: nil,
},
{
name: "same host, no port",
want: true,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "host"},
},
{
name: "same host, same default port",
want: true,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host:6443"},
},
{
name: "same host, same custom port",
want: true,
prev: &Endpoint{Host: "host:6442"},
new: &Endpoint{Host: "host:6442"},
},
{
name: "different host, no port",
want: false,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "new-host"},
},
{
name: "different host, different port",
want: false,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "new-host:6442"},
},
{
name: "same host, old custom port, new no port",
want: false,
prev: &Endpoint{Host: "host:6442"},
new: &Endpoint{Host: "host"},
},
{
name: "same host, old default port, new no port",
want: true,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host"},
},
{
name: "same host, old no port, new custom port",
want: false,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "host:6442"},
},
{
name: "same host, old no port, new default port",
want: true,
prev: &Endpoint{Host: "host"},
new: &Endpoint{Host: "host:6443"},
},
{
name: "same host, old default port, new no port",
want: true,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host"},
},
{
name: "invalid host",
want: false,
prev: &Endpoint{Host: "host:6443"},
new: &Endpoint{Host: "host::"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.want != tt.prev.CloudStackEqual(tt.new) {
t.Errorf("Endpoint %+v should be equals to %+v", tt.prev, tt.new)
}
})
}
}
func TestProxyConfigurationEquals(t *testing.T) {
tests := []struct {
name string
want bool
prev *ProxyConfiguration
new *ProxyConfiguration
}{
{
name: "previous and new == nil",
want: true,
prev: nil,
new: nil,
},
{
name: "previous == nil",
want: false,
prev: nil,
new: &ProxyConfiguration{},
},
{
name: "previous == nil",
want: false,
prev: &ProxyConfiguration{},
new: nil,
},
{
name: "previous == new, all exists",
want: true,
prev: &ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
NoProxy: []string{
"noproxy1",
"noproxy2",
},
},
new: &ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
NoProxy: []string{
"noproxy1",
"noproxy2",
},
},
},
{
name: "previous == new, only httpproxy",
want: true,
prev: &ProxyConfiguration{HttpProxy: "httpproxy"},
new: &ProxyConfiguration{HttpProxy: "httpproxy"},
},
{
name: "previous == new, only noproxy, order diff",
want: true,
prev: &ProxyConfiguration{NoProxy: []string{
"noproxy1",
"noproxy2",
"noproxy3",
}},
new: &ProxyConfiguration{NoProxy: []string{
"noproxy2",
"noproxy3",
"noproxy1",
}},
},
{
name: "previous != new, httpsproxy diff",
want: false,
prev: &ProxyConfiguration{HttpsProxy: "httpsproxy1"},
new: &ProxyConfiguration{HttpsProxy: "httpsproxy2"},
},
{
name: "previous != new, noproxy diff val",
want: false,
prev: &ProxyConfiguration{
HttpProxy: "",
NoProxy: []string{
"noproxy1",
"noproxy2",
},
},
new: &ProxyConfiguration{
HttpProxy: "",
NoProxy: []string{
"noproxy2",
"noproxy3",
},
},
},
{
name: "previous != new, noproxy diff one empty",
want: false,
prev: &ProxyConfiguration{
NoProxy: []string{
"noproxy1",
"noproxy2",
},
},
new: &ProxyConfiguration{},
},
{
name: "previous != new, noproxy diff length",
want: false,
prev: &ProxyConfiguration{
NoProxy: []string{
"noproxy1",
"noproxy2",
},
},
new: &ProxyConfiguration{
NoProxy: []string{
"noproxy1",
"noproxy2",
"noproxy3",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.want != tt.prev.Equal(tt.new) {
t.Errorf("ProxyConfiguration %+v should be equals to %+v", tt.prev, tt.new)
}
})
}
}
func TestClusterNetworkEquals(t *testing.T) {
tests := []struct {
name string
want bool
prev *ClusterNetwork
new *ClusterNetwork
}{
{
name: "previous and new == nil",
want: true,
prev: nil,
new: nil,
},
{
name: "previous == nil",
want: false,
prev: nil,
new: &ClusterNetwork{},
},
{
name: "previous == nil",
want: false,
prev: &ClusterNetwork{},
new: nil,
},
{
name: "previous == new, all exists",
want: true,
prev: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/5",
"1.2.3.4/6",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
"1.2.3.4/8",
},
},
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/5",
"1.2.3.4/6",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
"1.2.3.4/8",
},
},
},
},
{
name: "previous == new, pods empty",
want: true,
prev: &ClusterNetwork{
Services: Services{
CidrBlocks: []string{},
},
},
new: &ClusterNetwork{
Pods: Pods{},
Services: Services{},
},
},
{
name: "previous == new, order diff",
want: true,
prev: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/5",
"1.2.3.4/6",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
"1.2.3.4/8",
},
},
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/6",
"1.2.3.4/5",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/8",
"1.2.3.4/7",
},
},
},
},
{
name: "previous != new, pods diff",
want: false,
prev: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/5",
"1.2.3.4/6",
},
},
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
},
},
{
name: "previous != new, services diff, one empty",
want: false,
prev: &ClusterNetwork{},
new: &ClusterNetwork{
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
"1.2.3.4/8",
},
},
},
},
{
name: "previous != new, services diff, CidrBlocks empty",
want: false,
prev: &ClusterNetwork{
Services: Services{
CidrBlocks: []string{},
},
},
new: &ClusterNetwork{
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
"1.2.3.4/8",
},
},
},
},
{
name: "previous == new, same cni, older format",
want: true,
prev: &ClusterNetwork{
CNI: Cilium,
},
new: &ClusterNetwork{
CNI: Cilium,
},
},
{
name: "previous != new, diff CNI, older format",
want: false,
prev: &ClusterNetwork{
CNI: Kindnetd,
},
new: &ClusterNetwork{
CNI: Cilium,
},
},
{
name: "previous == new, same cni, diff format",
want: true,
prev: &ClusterNetwork{
CNI: Cilium,
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
},
},
{
name: "previous != new, same cni, diff format, diff cilium policy mode",
want: false,
prev: &ClusterNetwork{
CNI: Cilium,
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{PolicyEnforcementMode: "always"}},
},
},
{
name: "previous != new, different cni, different format",
want: false,
prev: &ClusterNetwork{
CNI: Kindnetd,
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
},
},
{
name: "previous != new, new cniConfig format, diff cni",
want: false,
prev: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Kindnetd: &KindnetdConfig{}},
},
},
{
name: "previous == new, new cniConfig format, same cni",
want: true,
prev: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
},
},
{
name: "previous != new, new cniConfig format, same cilium cni, diff configuration",
want: false,
prev: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{PolicyEnforcementMode: "always"}},
},
new: &ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{PolicyEnforcementMode: "default"}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.want != tt.new.Equal(tt.prev) {
t.Errorf("ClusterNetwork %+v should be equals to %+v", tt.prev, tt.new)
}
})
}
}
func TestRefEquals(t *testing.T) {
tests := []struct {
name string
want bool
prev *Ref
new *Ref
}{
{
name: "previous and new == nil",
want: true,
prev: nil,
new: nil,
},
{
name: "previous == nil",
want: false,
prev: nil,
new: &Ref{},
},
{
name: "previous == nil",
want: false,
prev: &Ref{},
new: nil,
},
{
name: "previous == new",
want: true,
prev: &Ref{Kind: "kind", Name: "name"},
new: &Ref{Kind: "kind", Name: "name"},
},
{
name: "previous != new, val diff",
want: false,
prev: &Ref{Kind: "kind1", Name: "name1"},
new: &Ref{Kind: "kind2", Name: "name2"},
},
{
name: "previous != new, one missing kind",
want: false,
prev: &Ref{Name: "name"},
new: &Ref{Kind: "kind", Name: "name"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.want != tt.prev.Equal(tt.new) {
t.Errorf("Ref %+v should be equals to %+v", tt.prev, tt.new)
}
})
}
}
func TestValidateNetworking(t *testing.T) {
nodeCidrMaskSize := new(int)
*nodeCidrMaskSize = 28
tests := []struct {
name string
wantErr error
cluster *Cluster
}{
{
name: "both formats used",
wantErr: fmt.Errorf("invalid format for cni plugin: both old and new formats used, use only the CNIConfig field"),
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/8",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
},
},
CNI: Cilium,
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
},
},
},
},
{
name: "deprecated CNI field",
wantErr: nil,
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/8",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
},
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "no CNI plugin input",
wantErr: fmt.Errorf("cni not specified"),
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/8",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
},
},
CNI: "",
CNIConfig: nil,
},
},
},
},
{
name: "node cidr mask size valid",
wantErr: nil,
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/24",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "node cidr mask size invalid",
wantErr: fmt.Errorf("the size of pod subnet with mask 30 is smaller than or equal to the size of node subnet with mask 28"),
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/30",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "node cidr mask size invalid diff",
wantErr: fmt.Errorf("pod subnet mask (6) and node-mask (28) difference is greater than 16"),
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "both pods CIDR block and service CIDR block do not conflict with control plane endpoint",
wantErr: nil,
cluster: &Cluster{
Spec: ClusterSpec{
DatacenterRef: Ref{
Kind: SnowDatacenterKind,
},
ControlPlaneConfiguration: ControlPlaneConfiguration{
Endpoint: &Endpoint{
Host: "192.168.1.10",
},
},
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"10.1.0.0/16",
},
},
Services: Services{
CidrBlocks: []string{
"10.96.0.0/12",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "invalid pods CIDR block",
wantErr: fmt.Errorf("invalid CIDR block format for Pods: {[1.2.3]}. Please specify a valid CIDR block for pod subnet"),
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/7",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "pods CIDR block conflicts with control plane endpoint",
wantErr: fmt.Errorf("control plane endpoint 192.168.1.10 conflicts with pods CIDR block 192.168.1.0/24"),
cluster: &Cluster{
Spec: ClusterSpec{
DatacenterRef: Ref{
Kind: SnowDatacenterKind,
},
ControlPlaneConfiguration: ControlPlaneConfiguration{
Endpoint: &Endpoint{
Host: "192.168.1.10",
},
},
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"192.168.1.0/24",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "invalid services CIDR block",
wantErr: fmt.Errorf("invalid CIDR block for Services: {[1.2.3]}. Please specify a valid CIDR block for service subnet"),
cluster: &Cluster{
Spec: ClusterSpec{
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
Services: Services{
CidrBlocks: []string{
"1.2.3",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "services CIDR block conflicts with control plane endpoint",
wantErr: fmt.Errorf("control plane endpoint 192.168.1.10 conflicts with services CIDR block 192.168.1.0/24"),
cluster: &Cluster{
Spec: ClusterSpec{
DatacenterRef: Ref{
Kind: SnowDatacenterKind,
},
ControlPlaneConfiguration: ControlPlaneConfiguration{
Endpoint: &Endpoint{
Host: "192.168.1.10",
},
},
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
Services: Services{
CidrBlocks: []string{
"192.168.1.0/24",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
{
name: "control plane endpoint is invalid",
wantErr: fmt.Errorf("control plane endpoint 192.168.1 is invalid"),
cluster: &Cluster{
Spec: ClusterSpec{
DatacenterRef: Ref{
Kind: SnowDatacenterKind,
},
ControlPlaneConfiguration: ControlPlaneConfiguration{
Endpoint: &Endpoint{
Host: "192.168.1",
},
},
ClusterNetwork: ClusterNetwork{
Pods: Pods{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
Services: Services{
CidrBlocks: []string{
"10.1.0.0/24",
},
},
Nodes: &Nodes{
CIDRMaskSize: nodeCidrMaskSize,
},
CNI: Cilium,
CNIConfig: nil,
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := validateNetworking(tt.cluster)
if !reflect.DeepEqual(tt.wantErr, got) {
t.Errorf("%v got = %v, want %v", tt.name, got, tt.wantErr)
}
})
}
}
func TestValidateCNIConfig(t *testing.T) {
tests := []struct {
name string
wantErr error
clusterNetwork *ClusterNetwork
}{
{
name: "CNI plugin not specified",
wantErr: fmt.Errorf("validating cniConfig: no cni plugin specified"),
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{},
},
},
{
name: "multiple CNI plugins specified",
wantErr: fmt.Errorf("validating cniConfig: cannot specify more than one cni plugins"),
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{},
Kindnetd: &KindnetdConfig{},
},
},
},
{
name: "invalid cilium policy enforcement mode",
wantErr: fmt.Errorf("validating cniConfig: cilium policyEnforcementMode \"invalid\" not supported"),
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
PolicyEnforcementMode: "invalid",
},
},
},
},
{
name: "invalid cilium policy enforcement mode and > 1 plugins",
wantErr: fmt.Errorf("validating cniConfig: [cilium policyEnforcementMode \"invalid\" not supported, cannot specify more than one cni plugins]"),
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
PolicyEnforcementMode: "invalid",
},
Kindnetd: &KindnetdConfig{},
},
},
},
{
name: "valid cilium policy enforcement mode",
wantErr: nil,
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
PolicyEnforcementMode: "default",
},
},
},
},
{
name: "valid cilium policy enforcement mode",
wantErr: nil,
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
PolicyEnforcementMode: "always",
},
},
},
},
{
name: "valid cilium policy enforcement mode",
wantErr: nil,
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
PolicyEnforcementMode: "never",
},
},
},
},
{
name: "CiliumSkipUpgradeWithoutOtherFields",
wantErr: nil,
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
},
},
},
{
name: "CiliumSkipUpgradeWithOtherFields",
wantErr: fmt.Errorf("validating cniConfig: when using skipUpgrades for cilium all " +
"other fields must be empty"),
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
SkipUpgrade: ptr.Bool(true),
PolicyEnforcementMode: "never",
},
},
},
},
{
name: "CiliumSkipUpgradeExplicitFalseWithOtherFields",
clusterNetwork: &ClusterNetwork{
CNIConfig: &CNIConfig{
Cilium: &CiliumConfig{
SkipUpgrade: ptr.Bool(false),
PolicyEnforcementMode: "never",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := validateCNIConfig(tt.clusterNetwork.CNIConfig)
if !reflect.DeepEqual(tt.wantErr, got) {
t.Errorf("%v got = %v, want %v", tt.name, got, tt.wantErr)
}
})
}
}
func TestValidateMirrorConfig(t *testing.T) {
tests := []struct {
name string
wantErr string
cluster *Cluster
}{
{
name: "registry mirror not specified",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: nil,
},
},
},
{
name: "endpoint not specified",
wantErr: "no value set for RegistryMirrorConfiguration.Endpoint",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "",
},
},
},
},
{
name: "invalid port",
wantErr: "registry mirror port 65536 is invalid",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "65536",
},
},
},
},
{
name: "multiple mappings for curated packages",
wantErr: "only one registry mirror for curated packages is suppported",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "30003",
OCINamespaces: []OCINamespace{
{
Registry: "783794618700.dkr.ecr.us-west-2.amazonaws.com",
Namespace: "",
},
{
Registry: "783794618700.dkr.ecr.us-east-1.amazonaws.com",
Namespace: "",
},
},
},
},
},
},
{
name: "one registry in OCINamespace but not public.ecr.aws",
wantErr: "registry must be public.ecr.aws when only one mapping is specified",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "30003",
OCINamespaces: []OCINamespace{
{
Registry: "783794618700.dkr.ecr.us-west-2.amazonaws.com",
Namespace: "curated-packages",
},
},
},
},
},
},
{
name: "empty registry in OCINamespace",
wantErr: "registry can't be set to empty in OCINamespaces",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "30003",
OCINamespaces: []OCINamespace{
{
Registry: "",
Namespace: "",
},
},
},
},
},
},
{
name: "insecureSkipVerify on snow provider",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
InsecureSkipVerify: true,
},
DatacenterRef: Ref{
Kind: SnowDatacenterKind,
},
},
},
},
{
name: "insecureSkipVerify on nutanix provider",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
InsecureSkipVerify: true,
},
DatacenterRef: Ref{
Kind: NutanixDatacenterKind,
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := validateMirrorConfig(tt.cluster)
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestValidateAutoscalingConfig(t *testing.T) {
tests := []struct {
name string
wantErr string
workerNodeGroupConfiguration *WorkerNodeGroupConfiguration
}{
{
name: "autoscaling config nil",
wantErr: "",
workerNodeGroupConfiguration: nil,
},
{
name: "autoscaling config valid",
wantErr: "",
workerNodeGroupConfiguration: &WorkerNodeGroupConfiguration{
Count: ptr.Int(2),
AutoScalingConfiguration: &AutoScalingConfiguration{
MinCount: 1,
MaxCount: 2,
},
},
},
{
name: "negative min count",
wantErr: "min count must be non negative",
workerNodeGroupConfiguration: &WorkerNodeGroupConfiguration{
Count: ptr.Int(1),
AutoScalingConfiguration: &AutoScalingConfiguration{
MinCount: -1,
},
},
},
{
name: "min count > max count",
wantErr: "min count must be no greater than max count",
workerNodeGroupConfiguration: &WorkerNodeGroupConfiguration{
Count: ptr.Int(1),
AutoScalingConfiguration: &AutoScalingConfiguration{
MinCount: 2,
MaxCount: 1,
},
},
},
{
name: "count < min count",
wantErr: "min count must be less than or equal to count",
workerNodeGroupConfiguration: &WorkerNodeGroupConfiguration{
Count: ptr.Int(1),
AutoScalingConfiguration: &AutoScalingConfiguration{
MinCount: 2,
MaxCount: 3,
},
},
},
{
name: "count > max count",
wantErr: "max count must be greater than or equal to count",
workerNodeGroupConfiguration: &WorkerNodeGroupConfiguration{
Count: ptr.Int(4),
AutoScalingConfiguration: &AutoScalingConfiguration{
MinCount: 2,
MaxCount: 3,
},
},
},
{
name: "count < 0 with nil autoscaling",
wantErr: "worker node count must be zero or greater if autoscaling is not enabled",
workerNodeGroupConfiguration: &WorkerNodeGroupConfiguration{
Count: ptr.Int(-1),
},
},
{
name: "nil autoscaling",
wantErr: "",
workerNodeGroupConfiguration: &WorkerNodeGroupConfiguration{
Count: ptr.Int(1),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := validateAutoscalingConfig(tt.workerNodeGroupConfiguration)
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestClusterRegistryAuth(t *testing.T) {
tests := []struct {
name string
cluster *Cluster
want bool
}{
{
name: "with registry mirror auth",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
Authenticate: true,
},
},
},
want: true,
},
{
name: "without registry mirror auth",
cluster: &Cluster{
Spec: ClusterSpec{
RegistryMirrorConfiguration: &RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
},
},
},
want: false,
},
{
name: "without registry mirror",
cluster: &Cluster{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster.RegistryAuth()).To(Equal(tt.want))
})
}
}
func TestClusterProxyConfiguration(t *testing.T) {
tests := []struct {
name string
cluster *Cluster
want map[string]string
}{
{
name: "with proxy configuration",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
Endpoint: &Endpoint{
Host: "1.2.3.4",
},
},
ProxyConfiguration: &ProxyConfiguration{
HttpProxy: "test-http",
HttpsProxy: "test-https",
NoProxy: []string{"test-noproxy-1", "test-noproxy-2", "test-noproxy-3"},
},
},
},
want: map[string]string{
"HTTP_PROXY": "test-http",
"HTTPS_PROXY": "test-https",
"NO_PROXY": "test-noproxy-1,test-noproxy-2,test-noproxy-3,1.2.3.4",
},
},
{
name: "without proxy configuration",
cluster: &Cluster{},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster.ProxyConfiguration()).To(Equal(tt.want))
})
}
}
func TestValidateControlPlaneEndpoint(t *testing.T) {
tests := []struct {
name string
wantErr string
cluster *Cluster
}{
{
name: "docker provider - control plane endpoint is not set",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
DatacenterRef: Ref{
Kind: DockerDatacenterKind,
},
ControlPlaneConfiguration: ControlPlaneConfiguration{},
},
},
},
{
name: "control plane ip is not set",
wantErr: "cluster controlPlaneConfiguration.Endpoint.Host is not set or is empty",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
Endpoint: &Endpoint{
Host: "",
},
},
},
},
},
{
name: "control plane ip is set",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
Endpoint: &Endpoint{
Host: "test-ip",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := validateControlPlaneEndpoint(tt.cluster)
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestValidateCPUpgradeRolloutStrategy(t *testing.T) {
tests := []struct {
name string
wantErr string
cluster *Cluster
}{
{
name: "rolling upgrade strategy invalid",
wantErr: "ControlPlaneConfiguration: only 'RollingUpdate' supported for upgrade rollout strategy type",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
UpgradeRolloutStrategy: &ControlPlaneUpgradeRolloutStrategy{Type: "NotRollingUpdate"},
},
},
},
},
{
name: "rolling upgrade knobs not specified",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
UpgradeRolloutStrategy: &ControlPlaneUpgradeRolloutStrategy{Type: "RollingUpdate"},
},
},
},
},
{
name: "rolling upgrade invalid",
wantErr: "maxSurge for control plane must be 0 or 1",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
UpgradeRolloutStrategy: &ControlPlaneUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: ControlPlaneRollingUpdateParams{MaxSurge: 2}},
},
},
},
},
{
name: "rolling upgrade knobs valid value 0",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
UpgradeRolloutStrategy: &ControlPlaneUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: ControlPlaneRollingUpdateParams{MaxSurge: 0}},
},
},
},
},
{
name: "rolling upgrade knobs valid value 1",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
UpgradeRolloutStrategy: &ControlPlaneUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: ControlPlaneRollingUpdateParams{MaxSurge: 1}},
},
},
},
},
{
name: "rolling upgrade knobs invalid value -1",
wantErr: "ControlPlaneConfiguration: maxSurge for control plane cannot be a negative value",
cluster: &Cluster{
Spec: ClusterSpec{
ControlPlaneConfiguration: ControlPlaneConfiguration{
UpgradeRolloutStrategy: &ControlPlaneUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: ControlPlaneRollingUpdateParams{MaxSurge: -1}},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := validateCPUpgradeRolloutStrategy(tt.cluster)
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestValidateMDUpgradeRolloutStrategy(t *testing.T) {
tests := []struct {
name string
wantErr string
cluster *Cluster
}{
{
name: "rolling upgrade strategy invalid",
wantErr: "WorkerNodeGroupConfiguration: only 'RollingUpdate' supported for upgrade rollout strategy type",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "NotRollingUpdate"},
}},
},
},
},
{
name: "rolling upgrade knobs not specified",
wantErr: "WorkerNodeGroupConfiguration: maxSurge and maxUnavailable not specified or are 0. maxSurge and maxUnavailable cannot both be 0",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate"},
}},
},
},
},
{
name: "rolling upgrade invalid",
wantErr: "WorkerNodeGroupConfiguration: maxSurge and maxUnavailable not specified or are 0. maxSurge and maxUnavailable cannot both be 0",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: 0, MaxUnavailable: 0}},
}},
},
},
},
{
name: "rolling upgrade knobs valid value 0,1",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: 0, MaxUnavailable: 1}},
}},
},
},
},
{
name: "rolling upgrade knobs valid value 1,0",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: 1, MaxUnavailable: 0}},
}},
},
},
},
{
name: "rolling upgrade knobs valid value 5,0",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: 5, MaxUnavailable: 0}},
}},
},
},
},
{
name: "rolling upgrade knobs valid value 3,1",
wantErr: "",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: 3, MaxUnavailable: 1}},
}},
},
},
},
{
name: "rolling upgrade knobs invalid values 3,-1",
wantErr: "WorkerNodeGroupConfiguration: maxSurge and maxUnavailable values cannot be negative",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: 3, MaxUnavailable: -1}},
}},
},
},
},
{
name: "rolling upgrade knobs invalid values -3,1",
wantErr: "WorkerNodeGroupConfiguration: maxSurge and maxUnavailable values cannot be negative",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: -3, MaxUnavailable: 1}},
}},
},
},
},
{
name: "rolling upgrade knobs invalid values -3,-1",
wantErr: "WorkerNodeGroupConfiguration: maxSurge and maxUnavailable values cannot be negative",
cluster: &Cluster{
Spec: ClusterSpec{
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{{
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: -3, MaxUnavailable: -1}},
}},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := validateMDUpgradeRolloutStrategy(&tt.cluster.Spec.WorkerNodeGroupConfigurations[0])
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestGetClusterDefaultKubernetesVersion(t *testing.T) {
g := NewWithT(t)
g.Expect(GetClusterDefaultKubernetesVersion()).To(Equal(Kube127))
}
func TestClusterWorkerNodeConfigCount(t *testing.T) {
tests := []struct {
name string
cluster *Cluster
want []WorkerNodeGroupConfiguration
}{
{
name: "with worker node config count",
cluster: &Cluster{
Spec: ClusterSpec{},
},
want: []WorkerNodeGroupConfiguration{
{
Name: "",
Count: ptr.Int(5),
AutoScalingConfiguration: nil,
MachineGroupRef: nil,
Taints: nil,
Labels: nil,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cg := NewClusterGenerate("test-cluster", WorkerNodeConfigCount(5))
g := NewWithT(t)
g.Expect(cg.Spec.WorkerNodeGroupConfigurations).To(Equal(tt.want))
})
}
}
func TestClusterCPUpgradeRolloutStrategyNil(t *testing.T) {
tests := []struct {
name string
cluster *Cluster
want ControlPlaneConfiguration
}{
{
name: "with control plane rollout upgrade strategy nil",
cluster: &Cluster{
Spec: ClusterSpec{},
},
want: ControlPlaneConfiguration{
Endpoint: nil,
Count: 1,
MachineGroupRef: nil,
Taints: nil,
Labels: nil,
UpgradeRolloutStrategy: nil,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cg := NewClusterGenerate("test-cluster", ControlPlaneConfigCount(1))
g := NewWithT(t)
g.Expect(cg.Spec.ControlPlaneConfiguration).To(Equal(tt.want))
})
}
}
func TestClusterCPUpgradeRolloutStrategyNotNil(t *testing.T) {
tests := []struct {
name string
cluster *Cluster
want ControlPlaneConfiguration
}{
{
name: "with control plane rollout upgrade strategy non-nil",
cluster: &Cluster{
Spec: ClusterSpec{},
},
want: ControlPlaneConfiguration{
Endpoint: nil,
Count: 1,
MachineGroupRef: nil,
Taints: nil,
Labels: nil,
UpgradeRolloutStrategy: &ControlPlaneUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: ControlPlaneRollingUpdateParams{MaxSurge: 5}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cg := NewClusterGenerate("test-cluster", ControlPlaneConfigCount(1), WithCPUpgradeRolloutStrategy(5, 2))
g := NewWithT(t)
g.Expect(cg.Spec.ControlPlaneConfiguration).To(Equal(tt.want))
})
}
}
func TestClusterMDUpgradeRolloutStrategyNil(t *testing.T) {
tests := []struct {
name string
cluster *Cluster
want []WorkerNodeGroupConfiguration
}{
{
name: "with md rollout upgrade strategy knobs not specified",
cluster: &Cluster{
Spec: ClusterSpec{},
},
want: []WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
MachineGroupRef: nil,
Taints: nil,
Labels: nil,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cg := NewClusterGenerate("test-cluster", WorkerNodeConfigCount(1))
g := NewWithT(t)
g.Expect(cg.Spec.WorkerNodeGroupConfigurations).To(Equal(tt.want))
})
}
}
func TestClusterMDUpgradeRolloutStrategyNotNil(t *testing.T) {
tests := []struct {
name string
cluster *Cluster
want []WorkerNodeGroupConfiguration
}{
{
name: "with md rollout upgrade strategy",
cluster: &Cluster{
Spec: ClusterSpec{},
},
want: []WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
MachineGroupRef: nil,
Taints: nil,
Labels: nil,
UpgradeRolloutStrategy: &WorkerNodesUpgradeRolloutStrategy{Type: "RollingUpdate", RollingUpdate: WorkerNodesRollingUpdateParams{MaxSurge: 5, MaxUnavailable: 2}},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cg := NewClusterGenerate("test-cluster", WorkerNodeConfigCount(1), WithWorkerMachineUpgradeRolloutStrategy(5, 2))
g := NewWithT(t)
g.Expect(cg.Spec.WorkerNodeGroupConfigurations).To(Equal(tt.want))
})
}
}
func TestCloudstackK8sVersion(t *testing.T) {
tests := []struct {
testName string
k8sVersion KubernetesVersion
wantErr error
}{
{
testName: "SuccessK8sVersion",
k8sVersion: Kube122,
wantErr: nil,
},
{
testName: "SuccessK8sVersion",
k8sVersion: Kube123,
wantErr: nil,
},
{
testName: "SuccessK8sVersion",
k8sVersion: Kube124,
wantErr: nil,
},
{
testName: "FailureK8sVersion",
k8sVersion: Kube125,
wantErr: errors.New("cloudstack provider does not support K8s version > 1.24"),
},
{
testName: "FailureK8sVersion",
k8sVersion: Kube126,
wantErr: errors.New("cloudstack provider does not support K8s version > 1.24"),
},
{
testName: "InvalidK8sVersion",
k8sVersion: "1",
wantErr: errors.New("converting kubeVersion 1 to semver invalid major version in semver 1.0: strconv.ParseUint: parsing \"\": invalid syntax"),
},
}
for _, tc := range tests {
t.Run(tc.testName, func(tt *testing.T) {
got := ValidateCloudStackK8sVersion(tc.k8sVersion)
if !reflect.DeepEqual(tc.wantErr, got) {
t.Errorf("%v got = %v, want %v", tc.testName, got, tc.wantErr)
}
})
}
}
| 3,502 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"net"
"strconv"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/semver"
)
const (
// PausedAnnotation is an annotation that can be applied to EKS-A cluster
// object to prevent a controller from processing a resource.
pausedAnnotation = "anywhere.eks.amazonaws.com/paused"
// ManagedByCLIAnnotation can be applied to an EKS-A Cluster to signal when the CLI is currently
// performing an operation so the controller should not take any action. When marked for deletion,
// the controller will remove the finalizer and let the cluster be deleted.
ManagedByCLIAnnotation = "anywhere.eks.amazonaws.com/managed-by-cli"
// ControlPlaneAnnotation is an annotation that can be applied to EKS-A machineconfig
// object to prevent a controller from making changes to that resource.
controlPlaneAnnotation = "anywhere.eks.amazonaws.com/control-plane"
clusterResourceType = "clusters.anywhere.eks.amazonaws.com"
// etcdAnnotation can be applied to EKS-A machineconfig CR for etcd, to prevent controller from making changes to it.
etcdAnnotation = "anywhere.eks.amazonaws.com/etcd"
// managementAnnotation points to the name of a management cluster
// cluster object.
managementAnnotation = "anywhere.eks.amazonaws.com/managed-by"
// defaultEksaNamespace is the default namespace for EKS-A resources when not specified.
defaultEksaNamespace = "default"
// ControlEndpointDefaultPort defaults cluster control plane endpoint port if not specified.
ControlEndpointDefaultPort = "6443"
)
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ClusterSpec defines the desired state of Cluster.
type ClusterSpec struct {
KubernetesVersion KubernetesVersion `json:"kubernetesVersion,omitempty"`
ControlPlaneConfiguration ControlPlaneConfiguration `json:"controlPlaneConfiguration,omitempty"`
WorkerNodeGroupConfigurations []WorkerNodeGroupConfiguration `json:"workerNodeGroupConfigurations,omitempty"`
DatacenterRef Ref `json:"datacenterRef,omitempty"`
IdentityProviderRefs []Ref `json:"identityProviderRefs,omitempty"`
GitOpsRef *Ref `json:"gitOpsRef,omitempty"`
ClusterNetwork ClusterNetwork `json:"clusterNetwork,omitempty"`
// +kubebuilder:validation:Optional
ExternalEtcdConfiguration *ExternalEtcdConfiguration `json:"externalEtcdConfiguration,omitempty"`
ProxyConfiguration *ProxyConfiguration `json:"proxyConfiguration,omitempty"`
RegistryMirrorConfiguration *RegistryMirrorConfiguration `json:"registryMirrorConfiguration,omitempty"`
ManagementCluster ManagementCluster `json:"managementCluster,omitempty"`
PodIAMConfig *PodIAMConfig `json:"podIamConfig,omitempty"`
Packages *PackageConfiguration `json:"packages,omitempty"`
// BundlesRef contains a reference to the Bundles containing the desired dependencies for the cluster.
// DEPRECATED: Use EksaVersion instead.
BundlesRef *BundlesRef `json:"bundlesRef,omitempty"`
EksaVersion *EksaVersion `json:"eksaVersion,omitempty"`
}
// EksaVersion is the semver identifying the release of eks-a used to populate the cluster components.
type EksaVersion string
// Equal checks if two EksaVersions are equal.
func (n *EksaVersion) Equal(o *EksaVersion) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return *n == *o
}
// HasAWSIamConfig checks if AWSIamConfig is configured for the cluster.
func (c *Cluster) HasAWSIamConfig() bool {
for _, identityProvider := range c.Spec.IdentityProviderRefs {
if identityProvider.Kind == AWSIamConfigKind {
return true
}
}
return false
}
// IsPackagesEnabled checks if the user has opted out of curated packages
// installation.
func (c *Cluster) IsPackagesEnabled() bool {
return c.Spec.Packages == nil || !c.Spec.Packages.Disable
}
func (n *Cluster) Equal(o *Cluster) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
if n.Spec.KubernetesVersion != o.Spec.KubernetesVersion {
return false
}
if !n.Spec.DatacenterRef.Equal(&o.Spec.DatacenterRef) {
return false
}
if !n.Spec.ControlPlaneConfiguration.Endpoint.Equal(o.Spec.ControlPlaneConfiguration.Endpoint, n.Spec.DatacenterRef.Kind) {
return false
}
if !n.Spec.ControlPlaneConfiguration.Equal(&o.Spec.ControlPlaneConfiguration) {
return false
}
if !WorkerNodeGroupConfigurationsSliceEqual(n.Spec.WorkerNodeGroupConfigurations, o.Spec.WorkerNodeGroupConfigurations) {
return false
}
if !RefSliceEqual(n.Spec.IdentityProviderRefs, o.Spec.IdentityProviderRefs) {
return false
}
if !n.Spec.GitOpsRef.Equal(o.Spec.GitOpsRef) {
return false
}
if !n.Spec.ClusterNetwork.Equal(&o.Spec.ClusterNetwork) {
return false
}
if !n.Spec.ExternalEtcdConfiguration.Equal(o.Spec.ExternalEtcdConfiguration) {
return false
}
if !n.Spec.ProxyConfiguration.Equal(o.Spec.ProxyConfiguration) {
return false
}
if !n.Spec.RegistryMirrorConfiguration.Equal(o.Spec.RegistryMirrorConfiguration) {
return false
}
if !n.Spec.Packages.Equal(o.Spec.Packages) {
return false
}
if !n.ManagementClusterEqual(o) {
return false
}
if !n.Spec.BundlesRef.Equal(o.Spec.BundlesRef) {
return false
}
if !n.Spec.EksaVersion.Equal(o.Spec.EksaVersion) {
return false
}
return true
}
func (n *Cluster) Validate() error {
return ValidateClusterConfigContent(n)
}
func (n *Cluster) SetDefaults() {
// TODO: move any defaults that can return error out of this package
// All the defaults here should be context unaware
if err := setClusterDefaults(n); err != nil {
logger.Error(err, "Failed to validate Cluster")
}
}
type ProxyConfiguration struct {
HttpProxy string `json:"httpProxy,omitempty"`
HttpsProxy string `json:"httpsProxy,omitempty"`
NoProxy []string `json:"noProxy,omitempty"`
}
func (n *ProxyConfiguration) Equal(o *ProxyConfiguration) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.HttpProxy == o.HttpProxy && n.HttpsProxy == o.HttpsProxy && SliceEqual(n.NoProxy, o.NoProxy)
}
// RegistryMirrorConfiguration defines the settings for image registry mirror.
type RegistryMirrorConfiguration struct {
// Endpoint defines the registry mirror endpoint to use for pulling images
Endpoint string `json:"endpoint,omitempty"`
// Port defines the port exposed for registry mirror endpoint
Port string `json:"port,omitempty"`
// OCINamespaces defines the mapping from an upstream registry to a local namespace where upstream
// artifacts are placed into
OCINamespaces []OCINamespace `json:"ociNamespaces,omitempty"`
// CACertContent defines the contents registry mirror CA certificate
CACertContent string `json:"caCertContent,omitempty"`
// Authenticate defines if registry requires authentication
Authenticate bool `json:"authenticate,omitempty"`
// InsecureSkipVerify skips the registry certificate verification.
// Only use this solution for isolated testing or in a tightly controlled, air-gapped environment.
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
}
// OCINamespace represents an entity in a local reigstry to group related images.
type OCINamespace struct {
// Name refers to the name of the upstream registry
Registry string `json:"registry"`
// Namespace refers to the name of a namespace in the local registry
Namespace string `json:"namespace"`
}
func (n *RegistryMirrorConfiguration) Equal(o *RegistryMirrorConfiguration) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Endpoint == o.Endpoint && n.Port == o.Port && n.CACertContent == o.CACertContent &&
n.InsecureSkipVerify == o.InsecureSkipVerify && n.Authenticate == o.Authenticate &&
OCINamespacesSliceEqual(n.OCINamespaces, o.OCINamespaces)
}
// OCINamespacesSliceEqual is used to check equality of the OCINamespaces fields of two RegistryMirrorConfiguration.
func OCINamespacesSliceEqual(a, b []OCINamespace) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[generateOCINamespaceKey(v)]++
}
for _, v := range b {
k := generateOCINamespaceKey(v)
if _, ok := m[k]; !ok {
return false
}
m[k]--
if m[k] == 0 {
delete(m, k)
}
}
return len(m) == 0
}
func generateOCINamespaceKey(n OCINamespace) (key string) {
return n.Registry + n.Namespace
}
type ControlPlaneConfiguration struct {
// Count defines the number of desired control plane nodes. Defaults to 1.
Count int `json:"count,omitempty"`
// Endpoint defines the host ip and port to use for the control plane.
Endpoint *Endpoint `json:"endpoint,omitempty"`
// MachineGroupRef defines the machine group configuration for the control plane.
MachineGroupRef *Ref `json:"machineGroupRef,omitempty"`
// Taints define the set of taints to be applied on control plane nodes
Taints []corev1.Taint `json:"taints,omitempty"`
// Labels define the labels to assign to the node
Labels map[string]string `json:"labels,omitempty"`
// UpgradeRolloutStrategy determines the rollout strategy to use for rolling upgrades
// and related parameters/knobs
UpgradeRolloutStrategy *ControlPlaneUpgradeRolloutStrategy `json:"upgradeRolloutStrategy,omitempty"`
// SkipLoadBalancerDeployment skip deploying control plane load balancer.
// Make sure your infrastructure can handle control plane load balancing when you set this field to true.
SkipLoadBalancerDeployment bool `json:"skipLoadBalancerDeployment,omitempty"`
}
func TaintsSliceEqual(s1, s2 []corev1.Taint) bool {
if len(s1) != len(s2) {
return false
}
taints := make(map[corev1.Taint]struct{})
for _, taint := range s1 {
taints[taint] = struct{}{}
}
for _, taint := range s2 {
_, ok := taints[taint]
if !ok {
return false
}
}
return true
}
// MapEqual compares two maps to check whether or not they are equal.
func MapEqual(s1, s2 map[string]string) bool {
if len(s1) != len(s2) {
return false
}
for key, val := range s2 {
v, ok := s1[key]
if !ok {
return false
}
if val != v {
return false
}
}
return true
}
func (n *ControlPlaneConfiguration) Equal(o *ControlPlaneConfiguration) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Count == o.Count && n.MachineGroupRef.Equal(o.MachineGroupRef) &&
TaintsSliceEqual(n.Taints, o.Taints) && MapEqual(n.Labels, o.Labels)
}
type Endpoint struct {
// Host defines the ip that you want to use to connect to the control plane
Host string `json:"host"`
}
// Equal compares if expected endpoint and existing endpoint are equal for non CloudStack clusters.
func (n *Endpoint) Equal(o *Endpoint, kind string) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
if kind == CloudStackDatacenterKind {
return n.CloudStackEqual(o)
}
return n.Host == o.Host
}
// CloudStackEqual makes CloudStack cluster upgrade to new release backward compatible by striping CloudStack cluster existing endpoint default port
// and comparing if expected endpoint and existing endpoint are equal.
// Cloudstack CLI used to add default port to cluster object.
// Now cluster object stays the same with customer input and port is defaulted only in CAPI template.
func (n *Endpoint) CloudStackEqual(o *Endpoint) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
if n.Host == o.Host {
return true
}
nhost, nport, err := GetControlPlaneHostPort(n.Host, "")
if err != nil {
return false
}
ohost, oport, _ := GetControlPlaneHostPort(o.Host, "")
if oport == ControlEndpointDefaultPort {
switch nport {
case ControlEndpointDefaultPort, "":
return nhost == ohost
default:
return false
}
}
if nport == ControlEndpointDefaultPort && oport == "" {
return nhost == ohost
}
return n.Host == o.Host
}
// GetControlPlaneHostPort retrieves the ControlPlaneConfiguration host and port split defined in the cluster.Spec.
func GetControlPlaneHostPort(pHost string, defaultPort string) (string, string, error) {
host, port, err := net.SplitHostPort(pHost)
if err != nil {
if strings.Contains(err.Error(), "missing port") {
host = pHost
port = defaultPort
err = nil
} else {
return "", "", fmt.Errorf("host %s is invalid: %v", pHost, err.Error())
}
}
return host, port, err
}
type WorkerNodeGroupConfiguration struct {
// Name refers to the name of the worker node group
Name string `json:"name,omitempty"`
// Count defines the number of desired worker nodes. Defaults to 1.
Count *int `json:"count,omitempty"`
// AutoScalingConfiguration defines the auto scaling configuration
AutoScalingConfiguration *AutoScalingConfiguration `json:"autoscalingConfiguration,omitempty"`
// MachineGroupRef defines the machine group configuration for the worker nodes.
MachineGroupRef *Ref `json:"machineGroupRef,omitempty"`
// Taints define the set of taints to be applied on worker nodes
Taints []corev1.Taint `json:"taints,omitempty"`
// Labels define the labels to assign to the node
Labels map[string]string `json:"labels,omitempty"`
// UpgradeRolloutStrategy determines the rollout strategy to use for rolling upgrades
// and related parameters/knobs
UpgradeRolloutStrategy *WorkerNodesUpgradeRolloutStrategy `json:"upgradeRolloutStrategy,omitempty"`
}
func generateWorkerNodeGroupKey(c WorkerNodeGroupConfiguration) (key string) {
key = c.Name
if c.MachineGroupRef != nil {
key += c.MachineGroupRef.Kind + c.MachineGroupRef.Name
}
if c.AutoScalingConfiguration != nil {
key += "autoscaling" + strconv.Itoa(c.AutoScalingConfiguration.MaxCount) + strconv.Itoa(c.AutoScalingConfiguration.MinCount)
}
if c.Count == nil {
return "nil" + key
}
return strconv.Itoa(*c.Count) + key
}
func WorkerNodeGroupConfigurationsSliceEqual(a, b []WorkerNodeGroupConfiguration) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[generateWorkerNodeGroupKey(v)]++
}
for _, v := range b {
k := generateWorkerNodeGroupKey(v)
if _, ok := m[k]; !ok {
return false
}
m[k] -= 1
if m[k] == 0 {
delete(m, k)
}
}
if len(m) != 0 {
return false
}
return WorkerNodeGroupConfigurationSliceTaintsEqual(a, b) && WorkerNodeGroupConfigurationsLabelsMapEqual(a, b)
}
func WorkerNodeGroupConfigurationSliceTaintsEqual(a, b []WorkerNodeGroupConfiguration) bool {
m := make(map[string][]corev1.Taint, len(a))
for _, nodeGroup := range a {
m[nodeGroup.Name] = nodeGroup.Taints
}
for _, nodeGroup := range b {
if _, ok := m[nodeGroup.Name]; !ok {
// this method is not concerned with added/removed node groups,
// only with the comparison of taints on existing node groups
// if a node group is present in a but not b, or vise versa, it's immaterial
continue
} else {
if !TaintsSliceEqual(m[nodeGroup.Name], nodeGroup.Taints) {
return false
}
}
}
return true
}
func WorkerNodeGroupConfigurationsLabelsMapEqual(a, b []WorkerNodeGroupConfiguration) bool {
m := make(map[string]map[string]string, len(a))
for _, nodeGroup := range a {
m[nodeGroup.Name] = nodeGroup.Labels
}
for _, nodeGroup := range b {
if _, ok := m[nodeGroup.Name]; !ok {
// this method is not concerned with added/removed node groups,
// only with the comparison of labels on existing node groups
// if a node group is present in a but not b, or vise versa, it's immaterial
continue
} else {
if !MapEqual(m[nodeGroup.Name], nodeGroup.Labels) {
return false
}
}
}
return true
}
type ClusterNetwork struct {
// Comma-separated list of CIDR blocks to use for pod and service subnets.
// Defaults to 192.168.0.0/16 for pod subnet.
Pods Pods `json:"pods,omitempty"`
Services Services `json:"services,omitempty"`
// Deprecated. Use CNIConfig
CNI CNI `json:"cni,omitempty"`
// CNIConfig specifies the CNI plugin to be installed in the cluster
CNIConfig *CNIConfig `json:"cniConfig,omitempty"`
DNS DNS `json:"dns,omitempty"`
Nodes *Nodes `json:"nodes,omitempty"`
}
func (n *ClusterNetwork) Equal(o *ClusterNetwork) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
if !CNIPluginSame(*n, *o) {
return false
}
oldCNIConfig := getCNIConfig(o)
newCNIConfig := getCNIConfig(n)
if !newCNIConfig.Equal(oldCNIConfig) {
return false
}
return n.Pods.Equal(&o.Pods) &&
n.Services.Equal(&o.Services) &&
n.DNS.Equal(&o.DNS) &&
n.Nodes.Equal(o.Nodes)
}
func getCNIConfig(cn *ClusterNetwork) *CNIConfig {
/* Only needed since we're introducing CNIConfig to replace the deprecated CNI field. This way we can compare the individual fields
for the CNI plugin configuration*/
var tempCNIConfig *CNIConfig
if cn.CNIConfig == nil {
// This is for upgrading from release-0.7, to ensure that all oCNIConfig fields, such as policyEnforcementMode have the default values
switch cn.CNI {
case Cilium, CiliumEnterprise:
tempCNIConfig = &CNIConfig{Cilium: &CiliumConfig{}}
case Kindnetd:
tempCNIConfig = &CNIConfig{Kindnetd: &KindnetdConfig{}}
}
} else {
tempCNIConfig = cn.CNIConfig
}
return tempCNIConfig
}
func (n *Pods) Equal(o *Pods) bool {
return SliceEqual(n.CidrBlocks, o.CidrBlocks)
}
func (n *Services) Equal(o *Services) bool {
return SliceEqual(n.CidrBlocks, o.CidrBlocks)
}
func (n *DNS) Equal(o *DNS) bool {
return n.ResolvConf.Equal(o.ResolvConf)
}
func (n *CNIConfig) Equal(o *CNIConfig) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
if !n.Cilium.Equal(o.Cilium) {
return false
}
if !n.Kindnetd.Equal(o.Kindnetd) {
return false
}
return true
}
func (n *CiliumConfig) Equal(o *CiliumConfig) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
if n.PolicyEnforcementMode != o.PolicyEnforcementMode {
return false
}
if n.EgressMasqueradeInterfaces != o.EgressMasqueradeInterfaces {
return false
}
oSkipUpgradeIsFalse := o.SkipUpgrade == nil || !*o.SkipUpgrade
nSkipUpgradeIsFalse := n.SkipUpgrade == nil || !*n.SkipUpgrade
// We consider nil to be false in equality checks. Here we're checking if o is false then
// n must be false and vice-versa. If neither of these are true, then both o and n must be
// true so we don't need an explicit check.
if oSkipUpgradeIsFalse && !nSkipUpgradeIsFalse || !oSkipUpgradeIsFalse && nSkipUpgradeIsFalse {
return false
}
return true
}
func (n *KindnetdConfig) Equal(o *KindnetdConfig) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return true
}
func UsersSliceEqual(a, b []UserConfiguration) bool {
if len(a) != len(b) {
return false
}
m := make(map[string][]string, len(a))
for _, v := range a {
m[v.Name] = v.SshAuthorizedKeys
}
for _, v := range b {
if _, ok := m[v.Name]; !ok {
return false
}
if !SliceEqual(v.SshAuthorizedKeys, m[v.Name]) {
return false
}
}
return true
}
func CNIPluginSame(n ClusterNetwork, o ClusterNetwork) bool {
if n.CNI != "" {
/*This shouldn't be required since we set CNIConfig and unset CNI as part of cluster_defaults. However, while upgrading an existing cluster, the eks-a controller
does not set any defaults (no mutating webhook), so it gets stuck in an error loop. Adding these checks to avoid that. We can remove it when removing the CNI field
in a later release*/
return o.CNI == n.CNI
}
if n.CNIConfig != nil {
if o.CNI != "" {
switch o.CNI {
case Cilium, CiliumEnterprise:
if n.CNIConfig.Cilium == nil {
return false
}
case Kindnetd:
if n.CNIConfig.Kindnetd == nil {
return false
}
default:
return false
}
return true
}
if o.CNIConfig != nil {
if (n.CNIConfig.Cilium != nil && o.CNIConfig.Cilium == nil) || (n.CNIConfig.Cilium == nil && o.CNIConfig.Cilium != nil) {
return false
}
if (n.CNIConfig.Kindnetd != nil && o.CNIConfig.Kindnetd == nil) || (n.CNIConfig.Kindnetd == nil && o.CNIConfig.Kindnetd != nil) {
return false
}
}
}
return true
}
func SliceEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[v]++
}
for _, v := range b {
if _, ok := m[v]; !ok {
return false
}
m[v] -= 1
if m[v] == 0 {
delete(m, v)
}
}
return len(m) == 0
}
func RefSliceEqual(a, b []Ref) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[v.Name+v.Kind]++
}
for _, v := range b {
if _, ok := m[v.Name+v.Kind]; !ok {
return false
}
m[v.Name+v.Kind] -= 1
if m[v.Name+v.Kind] == 0 {
delete(m, v.Name+v.Kind)
}
}
return len(m) == 0
}
type Pods struct {
CidrBlocks []string `json:"cidrBlocks,omitempty"`
}
type Services struct {
CidrBlocks []string `json:"cidrBlocks,omitempty"`
}
type DNS struct {
// ResolvConf refers to the DNS resolver configuration
ResolvConf *ResolvConf `json:"resolvConf,omitempty"`
}
type ResolvConf struct {
// Path defines the path to the file that contains the DNS resolver configuration
Path string `json:"path,omitempty"`
}
type Nodes struct {
// CIDRMaskSize defines the mask size for node cidr in the cluster, default for ipv4 is 24. This is an optional field
CIDRMaskSize *int `json:"cidrMaskSize,omitempty"`
}
// Equal compares two Nodes definitions and return true if the are equivalent.
func (n *Nodes) Equal(o *Nodes) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
if n.CIDRMaskSize == o.CIDRMaskSize {
return true
}
if n.CIDRMaskSize == nil || o.CIDRMaskSize == nil {
return false
}
return *n.CIDRMaskSize == *o.CIDRMaskSize
}
func (n *ResolvConf) Equal(o *ResolvConf) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Path == o.Path
}
type KubernetesVersion string
const (
Kube118 KubernetesVersion = "1.18"
Kube119 KubernetesVersion = "1.19"
Kube120 KubernetesVersion = "1.20"
Kube121 KubernetesVersion = "1.21"
Kube122 KubernetesVersion = "1.22"
Kube123 KubernetesVersion = "1.23"
Kube124 KubernetesVersion = "1.24"
Kube125 KubernetesVersion = "1.25"
Kube126 KubernetesVersion = "1.26"
Kube127 KubernetesVersion = "1.27"
)
// KubeVersionToSemver converts kube version to semver for comparisons.
func KubeVersionToSemver(kubeVersion KubernetesVersion) (*semver.Version, error) {
// appending the ".0" as the patch version to have a valid semver string and use those semvers for comparison
return semver.New(string(kubeVersion) + ".0")
}
type CNI string
type CiliumPolicyEnforcementMode string
type CNIConfig struct {
Cilium *CiliumConfig `json:"cilium,omitempty"`
Kindnetd *KindnetdConfig `json:"kindnetd,omitempty"`
}
type CiliumConfig struct {
// PolicyEnforcementMode determines communication allowed between pods. Accepted values are default, always, never.
PolicyEnforcementMode CiliumPolicyEnforcementMode `json:"policyEnforcementMode,omitempty"`
// EgressMasquaradeInterfaces determines which network interfaces are used for masquerading. Accepted values are a valid interface name or interface prefix.
// +optional
EgressMasqueradeInterfaces string `json:"egressMasqueradeInterfaces,omitempty"`
// SkipUpgrade indicicates that Cilium maintenance should be skipped during upgrades. This can
// be used when operators wish to self manage the Cilium installation.
// +optional
SkipUpgrade *bool `json:"skipUpgrade,omitempty"`
}
// IsManaged returns true if SkipUpgrade is nil or false indicating EKS-A is responsible for
// the Cilium installation.
func (n *CiliumConfig) IsManaged() bool {
return n.SkipUpgrade == nil || !*n.SkipUpgrade
}
type KindnetdConfig struct{}
const (
Cilium CNI = "cilium"
CiliumEnterprise CNI = "cilium-enterprise"
Kindnetd CNI = "kindnetd"
)
var validCNIs = map[CNI]struct{}{
Cilium: {},
Kindnetd: {},
}
const (
CiliumPolicyModeDefault CiliumPolicyEnforcementMode = "default"
CiliumPolicyModeAlways CiliumPolicyEnforcementMode = "always"
CiliumPolicyModeNever CiliumPolicyEnforcementMode = "never"
)
var validCiliumPolicyEnforcementModes = map[CiliumPolicyEnforcementMode]bool{
CiliumPolicyModeAlways: true,
CiliumPolicyModeDefault: true,
CiliumPolicyModeNever: true,
}
// ClusterStatus defines the observed state of Cluster.
type ClusterStatus struct {
// Descriptive message about a fatal problem while reconciling a cluster
// +optional
FailureMessage *string `json:"failureMessage,omitempty"`
// EksdReleaseRef defines the properties of the EKS-D object on the cluster
EksdReleaseRef *EksdReleaseRef `json:"eksdReleaseRef,omitempty"`
// +optional
Conditions []Condition `json:"conditions,omitempty"`
// ReconciledGeneration represents the .metadata.generation the last time the
// cluster was successfully reconciled. It is the latest generation observed
// by the controller.
// NOTE: This field was added for internal use and we do not provide guarantees
// to its behavior if changed externally. Its meaning and implementation are
// subject to change in the future.
ReconciledGeneration int64 `json:"reconciledGeneration,omitempty"`
// ChildrenReconciledGeneration represents the sum of the .metadata.generation
// for all the linked objects for the cluster, observed the last time the
// cluster was successfully reconciled.
// NOTE: This field was added for internal use and we do not provide guarantees
// to its behavior if changed externally. Its meaning and implementation are
// subject to change in the future.
ChildrenReconciledGeneration int64 `json:"childrenReconciledGeneration,omitempty"`
// ObservedGeneration is the latest generation observed by the controller.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
type EksdReleaseRef struct {
// ApiVersion refers to the EKS-D API version
ApiVersion string `json:"apiVersion"`
// Kind refers to the Release kind for the EKS-D object
Kind string `json:"kind"`
// Name refers to the name of the EKS-D object on the cluster
Name string `json:"name"`
// Namespace refers to the namespace for the EKS-D release resources
Namespace string `json:"namespace"`
}
type BundlesRef struct {
// APIVersion refers to the Bundles APIVersion
APIVersion string `json:"apiVersion"`
// Name refers to the name of the Bundles object in the cluster
Name string `json:"name"`
// Namespace refers to the Bundles's namespace
Namespace string `json:"namespace"`
}
func (b *BundlesRef) Equal(o *BundlesRef) bool {
if b == nil || o == nil {
return b == o
}
return b.APIVersion == o.APIVersion && b.Name == o.Name && b.Namespace == o.Namespace
}
type Ref struct {
Kind string `json:"kind,omitempty"`
Name string `json:"name,omitempty"`
}
func (n *Ref) Equal(o *Ref) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Kind == o.Kind && n.Name == o.Name
}
// +kubebuilder:object:generate=false
// Interface for getting DatacenterRef fields for Cluster type.
type ProviderRefAccessor interface {
Kind() string
Name() string
}
// +kubebuilder:object:generate=false
// Interface for getting Kind field for Cluster type.
type KindAccessor interface {
Kind() string
ExpectedKind() string
}
// PackageConfiguration for installing EKS Anywhere curated packages.
type PackageConfiguration struct {
// Disable package controller on cluster
Disable bool `json:"disable,omitempty"`
// Controller package controller configuration
Controller *PackageControllerConfiguration `json:"controller,omitempty"`
// Cronjob for ecr token refresher
CronJob *PackageControllerCronJob `json:"cronjob,omitempty"`
}
// Equal for PackageConfiguration.
func (n *PackageConfiguration) Equal(o *PackageConfiguration) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Disable == o.Disable && n.Controller.Equal(o.Controller) && n.CronJob.Equal(o.CronJob)
}
// PackageControllerConfiguration configure aspects of package controller.
type PackageControllerConfiguration struct {
// Repository package controller repository
Repository string `json:"repository,omitempty"`
// Tag package controller tag
Tag string `json:"tag,omitempty"`
// Digest package controller digest
Digest string `json:"digest,omitempty"`
// DisableWebhooks on package controller
DisableWebhooks bool `json:"disableWebhooks,omitempty"`
// Env of package controller in the format `key=value`
Env []string `json:"env,omitempty"`
// Resources of package controller
Resources PackageControllerResources `json:"resources,omitempty"`
}
// Equal for PackageControllerConfiguration.
func (n *PackageControllerConfiguration) Equal(o *PackageControllerConfiguration) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Repository == o.Repository && n.Tag == o.Tag && n.Digest == o.Digest &&
n.DisableWebhooks == o.DisableWebhooks && SliceEqual(n.Env, o.Env) && n.Resources.Equal(&o.Resources)
}
// PackageControllerResources resource aspects of package controller.
type PackageControllerResources struct {
// Requests for image resources
Requests ImageResource `json:"requests,omitempty"`
Limits ImageResource `json:"limits,omitempty"`
}
// Equal for PackageControllerResources.
func (n *PackageControllerResources) Equal(o *PackageControllerResources) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Requests.Equal(&o.Requests) && n.Limits.Equal(&o.Limits)
}
// ImageResource resources for container image.
type ImageResource struct {
// CPU image cpu
CPU string `json:"cpu,omitempty"`
// Memory image memory
Memory string `json:"memory,omitempty"`
}
// Equal for ImageResource.
func (n *ImageResource) Equal(o *ImageResource) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.CPU == o.CPU && n.Memory == o.Memory
}
// PackageControllerCronJob configure aspects of package controller.
type PackageControllerCronJob struct {
// Repository ecr token refresher repository
Repository string `json:"repository,omitempty"`
// Tag ecr token refresher tag
Tag string `json:"tag,omitempty"`
// Digest ecr token refresher digest
Digest string `json:"digest,omitempty"`
// Disable on cron job
Disable bool `json:"disable,omitempty"`
}
// Equal for PackageControllerCronJob.
func (n *PackageControllerCronJob) Equal(o *PackageControllerCronJob) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Repository == o.Repository && n.Tag == o.Tag && n.Digest == o.Digest && n.Disable == o.Disable
}
// ExternalEtcdConfiguration defines the configuration options for using unstacked etcd topology.
type ExternalEtcdConfiguration struct {
Count int `json:"count,omitempty"`
// MachineGroupRef defines the machine group configuration for the etcd machines.
MachineGroupRef *Ref `json:"machineGroupRef,omitempty"`
}
func (n *ExternalEtcdConfiguration) Equal(o *ExternalEtcdConfiguration) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.Count == o.Count && n.MachineGroupRef.Equal(o.MachineGroupRef)
}
type ManagementCluster struct {
Name string `json:"name,omitempty"`
}
func (n *ManagementCluster) Equal(o ManagementCluster) bool {
return n.Name == o.Name
}
type PodIAMConfig struct {
ServiceAccountIssuer string `json:"serviceAccountIssuer"`
}
func (n *PodIAMConfig) Equal(o *PodIAMConfig) bool {
if n == o {
return true
}
if n == nil || o == nil {
return false
}
return n.ServiceAccountIssuer == o.ServiceAccountIssuer
}
// AutoScalingConfiguration defines the configuration for the node autoscaling feature.
type AutoScalingConfiguration struct {
// MinCount defines the minimum number of nodes for the associated resource group.
// +optional
MinCount int `json:"minCount,omitempty"`
// MaxCount defines the maximum number of nodes for the associated resource group.
// +optional
MaxCount int `json:"maxCount,omitempty"`
}
// ControlPlaneUpgradeRolloutStrategy indicates rollout strategy for cluster.
type ControlPlaneUpgradeRolloutStrategy struct {
Type string `json:"type,omitempty"`
RollingUpdate ControlPlaneRollingUpdateParams `json:"rollingUpdate,omitempty"`
}
// ControlPlaneRollingUpdateParams is API for rolling update strategy knobs.
type ControlPlaneRollingUpdateParams struct {
MaxSurge int `json:"maxSurge"`
}
// WorkerNodesUpgradeRolloutStrategy indicates rollout strategy for cluster.
type WorkerNodesUpgradeRolloutStrategy struct {
Type string `json:"type,omitempty"`
RollingUpdate WorkerNodesRollingUpdateParams `json:"rollingUpdate,omitempty"`
}
// WorkerNodesRollingUpdateParams is API for rolling update strategy knobs.
type WorkerNodesRollingUpdateParams struct {
MaxSurge int `json:"maxSurge"`
MaxUnavailable int `json:"maxUnavailable"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Cluster is the Schema for the clusters API.
type Cluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterSpec `json:"spec,omitempty"`
Status ClusterStatus `json:"status,omitempty"`
}
func (c *Cluster) GetConditions() clusterv1.Conditions {
return c.Status.Conditions
}
func (c *Cluster) SetConditions(conditions clusterv1.Conditions) {
c.Status.Conditions = conditions
}
// +kubebuilder:object:generate=false
// Same as Cluster except stripped down for generation of yaml file during generate clusterconfig.
type ClusterGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec ClusterSpec `json:"spec,omitempty"`
}
func (c *Cluster) Kind() string {
return c.TypeMeta.Kind
}
func (c *Cluster) ExpectedKind() string {
return ClusterKind
}
func (c *Cluster) PausedAnnotation() string {
return pausedAnnotation
}
func (c *Cluster) ControlPlaneAnnotation() string {
return controlPlaneAnnotation
}
func (c *Cluster) ResourceType() string {
return clusterResourceType
}
func (c *Cluster) EtcdAnnotation() string {
return etcdAnnotation
}
func (c *Cluster) IsSelfManaged() bool {
return c.Spec.ManagementCluster.Name == "" || c.Spec.ManagementCluster.Name == c.Name
}
func (c *Cluster) SetManagedBy(managementClusterName string) {
if c.Annotations == nil {
c.Annotations = map[string]string{}
}
c.Annotations[managementAnnotation] = managementClusterName
c.Spec.ManagementCluster.Name = managementClusterName
}
func (c *Cluster) SetSelfManaged() {
c.Spec.ManagementCluster.Name = c.Name
}
func (c *ClusterGenerate) SetSelfManaged() {
c.Spec.ManagementCluster.Name = c.Name
}
func (c *Cluster) ManagementClusterEqual(s2 *Cluster) bool {
return c.IsSelfManaged() && s2.IsSelfManaged() || c.Spec.ManagementCluster.Equal(s2.Spec.ManagementCluster)
}
// IsSingleNode checks if the cluster has only a single node specified between the controlplane and worker nodes.
func (c *Cluster) IsSingleNode() bool {
return c.Spec.ControlPlaneConfiguration.Count == 1 &&
len(c.Spec.WorkerNodeGroupConfigurations) <= 0
}
func (c *Cluster) MachineConfigRefs() []Ref {
machineConfigRefMap := make(refSet, 1)
machineConfigRefMap.addIfNotNil(c.Spec.ControlPlaneConfiguration.MachineGroupRef)
for _, m := range c.Spec.WorkerNodeGroupConfigurations {
machineConfigRefMap.addIfNotNil(m.MachineGroupRef)
}
if c.Spec.ExternalEtcdConfiguration != nil {
machineConfigRefMap.addIfNotNil(c.Spec.ExternalEtcdConfiguration.MachineGroupRef)
}
return machineConfigRefMap.toSlice()
}
type refSet map[Ref]struct{}
func (r refSet) addIfNotNil(ref *Ref) bool {
if ref != nil {
return r.add(*ref)
}
return false
}
func (r refSet) add(ref Ref) bool {
if _, present := r[ref]; !present {
r[ref] = struct{}{}
return true
} else {
return false
}
}
func (r refSet) toSlice() []Ref {
refs := make([]Ref, 0, len(r))
for ref := range r {
refs = append(refs, ref)
}
return refs
}
func (c *Cluster) ConvertConfigToConfigGenerateStruct() *ClusterGenerate {
namespace := defaultEksaNamespace
if c.Namespace != "" {
namespace = c.Namespace
}
config := &ClusterGenerate{
TypeMeta: c.TypeMeta,
ObjectMeta: ObjectMeta{
Name: c.Name,
Annotations: c.Annotations,
Namespace: namespace,
},
Spec: c.Spec,
}
return config
}
// IsManaged returns true if the Cluster is not self managed.
func (c *Cluster) IsManaged() bool {
return !c.IsSelfManaged()
}
// ManagedBy returns the Cluster's management cluster's name.
func (c *Cluster) ManagedBy() string {
return c.Spec.ManagementCluster.Name
}
// +kubebuilder:object:root=true
// ClusterList contains a list of Cluster.
type ClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Cluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&Cluster{}, &ClusterList{})
}
| 1,281 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"reflect"
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestClusterMachineConfigRefs(t *testing.T) {
cluster := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: v1alpha1.Kube119,
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{
Host: "test-ip",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(3),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test-1",
},
},
{
Count: ptr.Int(3),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test-2",
},
},
{
Count: ptr.Int(5),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test", // This tests duplicates
},
},
},
ExternalEtcdConfiguration: &v1alpha1.ExternalEtcdConfiguration{
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test-etcd",
},
},
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "eksa-unit-test",
},
},
}
want := []v1alpha1.Ref{
{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test-1",
},
{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test-2",
},
{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test-etcd",
},
}
got := cluster.MachineConfigRefs()
if !v1alpha1.RefSliceEqual(got, want) {
t.Fatalf("Expected %v, got %v", want, got)
}
}
func TestClusterIsSelfManaged(t *testing.T) {
testCases := []struct {
testName string
cluster *v1alpha1.Cluster
want bool
}{
{
testName: "empty name",
cluster: &v1alpha1.Cluster{},
want: true,
},
{
testName: "name same as self",
cluster: &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-1",
},
Spec: v1alpha1.ClusterSpec{
ManagementCluster: v1alpha1.ManagementCluster{
Name: "cluster-1",
},
},
},
want: true,
},
{
testName: "name different tha self",
cluster: &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-2",
},
Spec: v1alpha1.ClusterSpec{
ManagementCluster: v1alpha1.ManagementCluster{
Name: "cluster-1",
},
},
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster.IsSelfManaged()).To(Equal(tt.want))
})
}
}
func TestClusterSetManagedBy(t *testing.T) {
c := &v1alpha1.Cluster{}
managementClusterName := "managament-cluster"
c.SetManagedBy(managementClusterName)
g := NewWithT(t)
g.Expect(c.IsSelfManaged()).To(BeFalse())
g.Expect(c.ManagedBy()).To(Equal(managementClusterName))
}
func TestClusterSetSelfManaged(t *testing.T) {
c := &v1alpha1.Cluster{}
c.SetSelfManaged()
g := NewWithT(t)
g.Expect(c.IsSelfManaged()).To(BeTrue())
}
func TestClusterManagementClusterEqual(t *testing.T) {
testCases := []struct {
testName string
cluster1SelfManaged, cluster2SelfManaged bool
want bool
}{
{
testName: "both self managed",
cluster1SelfManaged: true,
cluster2SelfManaged: true,
want: true,
},
{
testName: "both managed",
cluster1SelfManaged: false,
cluster2SelfManaged: false,
want: true,
},
{
testName: "one managed, one self managed",
cluster1SelfManaged: false,
cluster2SelfManaged: true,
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{}
setSelfManaged(cluster1, tt.cluster1SelfManaged)
cluster2 := &v1alpha1.Cluster{}
setSelfManaged(cluster2, tt.cluster2SelfManaged)
g := NewWithT(t)
g.Expect(cluster1.ManagementClusterEqual(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterResolvConfEqual(t *testing.T) {
testCases := []struct {
testName string
cluster1ResolvConf, cluster2ResolvConf string
want bool
}{
{
testName: "both empty",
cluster1ResolvConf: "",
cluster2ResolvConf: "",
want: true,
},
{
testName: "both defined",
cluster1ResolvConf: "my-file.conf",
cluster2ResolvConf: "my-file.conf",
want: true,
},
{
testName: "one empty, one defined",
cluster1ResolvConf: "",
cluster2ResolvConf: "my-file.conf",
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ClusterNetwork: v1alpha1.ClusterNetwork{
DNS: v1alpha1.DNS{
ResolvConf: &v1alpha1.ResolvConf{
Path: tt.cluster1ResolvConf,
},
},
},
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ClusterNetwork: v1alpha1.ClusterNetwork{
DNS: v1alpha1.DNS{
ResolvConf: &v1alpha1.ResolvConf{
Path: tt.cluster2ResolvConf,
},
},
},
},
}
g := NewWithT(t)
g.Expect(cluster1.Spec.ClusterNetwork.DNS.ResolvConf.Equal(cluster2.Spec.ClusterNetwork.DNS.ResolvConf)).To(Equal(tt.want))
})
}
}
func TestClusterEqualKubernetesVersion(t *testing.T) {
testCases := []struct {
testName string
cluster1Version, cluster2Version v1alpha1.KubernetesVersion
want bool
}{
{
testName: "both empty",
cluster1Version: "",
cluster2Version: "",
want: true,
},
{
testName: "one empty, one exists",
cluster1Version: "",
cluster2Version: v1alpha1.Kube118,
want: false,
},
{
testName: "both exists, diff",
cluster1Version: v1alpha1.Kube118,
cluster2Version: v1alpha1.Kube119,
want: false,
},
{
testName: "both exists, same",
cluster1Version: v1alpha1.Kube118,
cluster2Version: v1alpha1.Kube118,
want: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: tt.cluster1Version,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: tt.cluster2Version,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualWorkerNodeGroupConfigurations(t *testing.T) {
var emptyTaints []corev1.Taint
taint1 := corev1.Taint{Key: "key1"}
taint2 := corev1.Taint{Key: "key2"}
taints1 := []corev1.Taint{taint1, taint2}
taints1DiffOrder := []corev1.Taint{taint2, taint1}
taints2 := []corev1.Taint{taint1}
testCases := []struct {
testName string
cluster1Wngs, cluster2Wngs []v1alpha1.WorkerNodeGroupConfiguration
want bool
}{
{
testName: "both empty",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{},
want: true,
},
{
testName: "one empty, one exists",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
},
want: false,
},
{
testName: "both exist, same",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
},
want: true,
},
{
testName: "both exist, order diff",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Kind: "k1",
Name: "n1",
},
},
{
Count: ptr.Int(2),
MachineGroupRef: &v1alpha1.Ref{
Kind: "k2",
Name: "n2",
},
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(2),
MachineGroupRef: &v1alpha1.Ref{
Kind: "k2",
Name: "n2",
},
},
{
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Kind: "k1",
Name: "n1",
},
},
},
want: true,
},
{
testName: "both exist, count diff",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(2),
},
},
want: false,
},
{
testName: "both exist, autoscaling config diff",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
AutoScalingConfiguration: &v1alpha1.AutoScalingConfiguration{
MinCount: 1,
MaxCount: 3,
},
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
AutoScalingConfiguration: nil,
},
},
want: false,
},
{
testName: "both exist, autoscaling config min diff",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
AutoScalingConfiguration: &v1alpha1.AutoScalingConfiguration{
MinCount: 1,
MaxCount: 3,
},
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
AutoScalingConfiguration: &v1alpha1.AutoScalingConfiguration{
MinCount: 2,
MaxCount: 3,
},
},
},
want: false,
},
{
testName: "both exist, autoscaling config max diff",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
AutoScalingConfiguration: &v1alpha1.AutoScalingConfiguration{
MinCount: 1,
MaxCount: 2,
},
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
AutoScalingConfiguration: &v1alpha1.AutoScalingConfiguration{
MinCount: 1,
MaxCount: 3,
},
},
},
want: false,
},
{
testName: "both exist, ref diff",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &v1alpha1.Ref{
Kind: "k1",
Name: "n1",
},
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &v1alpha1.Ref{
Kind: "k2",
Name: "n2",
},
},
},
want: false,
},
{
testName: "both exist, same taints",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints1,
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints1,
},
},
want: true,
},
{
testName: "both exist, same taints in different order",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints1,
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints1DiffOrder,
},
},
want: true,
},
{
testName: "both exist, taints diff",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints1,
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints2,
},
},
want: false,
},
{
testName: "both exist, one with no taints",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints1,
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{},
},
want: false,
},
{
testName: "both exist, one with empty taints",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: taints1,
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: emptyTaints,
},
},
want: false,
},
{
testName: "both exist, both with empty taints",
cluster1Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: emptyTaints,
},
},
cluster2Wngs: []v1alpha1.WorkerNodeGroupConfiguration{
{
Taints: emptyTaints,
},
},
want: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
WorkerNodeGroupConfigurations: tt.cluster1Wngs,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
WorkerNodeGroupConfigurations: tt.cluster2Wngs,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualDatacenterRef(t *testing.T) {
testCases := []struct {
testName string
cluster1DatacenterRef, cluster2DatacenterRef v1alpha1.Ref
want bool
}{
{
testName: "both empty",
want: true,
},
{
testName: "one empty, one exists",
cluster1DatacenterRef: v1alpha1.Ref{
Kind: "k",
Name: "n",
},
want: false,
},
{
testName: "both exist, diff",
cluster1DatacenterRef: v1alpha1.Ref{
Kind: "k1",
Name: "n1",
},
cluster2DatacenterRef: v1alpha1.Ref{
Kind: "k2",
Name: "n2",
},
want: false,
},
{
testName: "both exist, same",
cluster1DatacenterRef: v1alpha1.Ref{
Kind: "k",
Name: "n",
},
cluster2DatacenterRef: v1alpha1.Ref{
Kind: "k",
Name: "n",
},
want: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
DatacenterRef: tt.cluster1DatacenterRef,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
DatacenterRef: tt.cluster2DatacenterRef,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualIdentityProviderRefs(t *testing.T) {
testCases := []struct {
testName string
cluster1Ipr, cluster2Ipr []v1alpha1.Ref
want bool
}{
{
testName: "both empty",
cluster1Ipr: []v1alpha1.Ref{},
cluster2Ipr: []v1alpha1.Ref{},
want: true,
},
{
testName: "one empty, one exists",
cluster1Ipr: []v1alpha1.Ref{
{
Kind: "k",
Name: "n",
},
},
want: false,
},
{
testName: "both exist, same",
cluster1Ipr: []v1alpha1.Ref{
{
Kind: "k",
Name: "n",
},
},
cluster2Ipr: []v1alpha1.Ref{
{
Kind: "k",
Name: "n",
},
},
want: true,
},
{
testName: "both exist, order diff",
cluster1Ipr: []v1alpha1.Ref{
{
Kind: "k1",
Name: "n1",
},
{
Kind: "k2",
Name: "n2",
},
},
cluster2Ipr: []v1alpha1.Ref{
{
Kind: "k2",
Name: "n2",
},
{
Kind: "k1",
Name: "n1",
},
},
want: true,
},
{
testName: "both exist, count diff",
cluster1Ipr: []v1alpha1.Ref{
{
Kind: "k1",
Name: "n1",
},
},
cluster2Ipr: []v1alpha1.Ref{
{
Kind: "k2",
Name: "n2",
},
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
IdentityProviderRefs: tt.cluster1Ipr,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
IdentityProviderRefs: tt.cluster2Ipr,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualGitOpsRef(t *testing.T) {
testCases := []struct {
testName string
cluster1GitOpsRef, cluster2GitOpsRef *v1alpha1.Ref
want bool
}{
{
testName: "both nil",
cluster1GitOpsRef: nil,
cluster2GitOpsRef: nil,
want: true,
},
{
testName: "one nil, one exists",
cluster1GitOpsRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
cluster2GitOpsRef: nil,
want: false,
},
{
testName: "both exist, diff",
cluster1GitOpsRef: &v1alpha1.Ref{
Kind: "k1",
Name: "n1",
},
cluster2GitOpsRef: &v1alpha1.Ref{
Kind: "k2",
Name: "n2",
},
want: false,
},
{
testName: "both exist, same",
cluster1GitOpsRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
cluster2GitOpsRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
want: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
GitOpsRef: tt.cluster1GitOpsRef,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
GitOpsRef: tt.cluster2GitOpsRef,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualClusterNetwork(t *testing.T) {
testCases := []struct {
testName string
cluster1ClusterNetwork, cluster2ClusterNetwork v1alpha1.ClusterNetwork
want bool
}{
{
testName: "both nil",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{},
cluster2ClusterNetwork: v1alpha1.ClusterNetwork{},
want: true,
},
{
testName: "one empty, one exists",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{
Pods: v1alpha1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
},
want: false,
},
{
testName: "both exist, diff",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{
Pods: v1alpha1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
},
cluster2ClusterNetwork: v1alpha1.ClusterNetwork{
Pods: v1alpha1.Pods{
CidrBlocks: []string{
"1.2.3.4/6",
},
},
},
want: false,
},
{
testName: "both exist, same",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{
Pods: v1alpha1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
},
cluster2ClusterNetwork: v1alpha1.ClusterNetwork{
Pods: v1alpha1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
},
want: true,
},
{
testName: "same cni plugin (cilium), diff format",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}},
},
cluster2ClusterNetwork: v1alpha1.ClusterNetwork{
CNI: v1alpha1.Cilium,
},
want: true,
},
{
testName: "different cni plugin (cilium), diff format",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{Kindnetd: &v1alpha1.KindnetdConfig{}},
},
cluster2ClusterNetwork: v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}},
},
want: false,
},
{
testName: "same cni plugin (cilium), diff cilium configuration",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{PolicyEnforcementMode: "always"}},
},
cluster2ClusterNetwork: v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{PolicyEnforcementMode: "default"}},
},
want: false,
},
{
testName: "diff Nodes",
cluster1ClusterNetwork: v1alpha1.ClusterNetwork{
Nodes: &v1alpha1.Nodes{},
},
cluster2ClusterNetwork: v1alpha1.ClusterNetwork{
Nodes: &v1alpha1.Nodes{
CIDRMaskSize: ptr.Int(3),
},
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ClusterNetwork: tt.cluster1ClusterNetwork,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ClusterNetwork: tt.cluster2ClusterNetwork,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualExternalEtcdConfiguration(t *testing.T) {
testCases := []struct {
testName string
cluster1Etcd, cluster2Etcd *v1alpha1.ExternalEtcdConfiguration
want bool
}{
{
testName: "both nil",
cluster1Etcd: nil,
cluster2Etcd: nil,
want: true,
},
{
testName: "one nil, one exists",
cluster1Etcd: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
cluster2Etcd: nil,
want: false,
},
{
testName: "both exist, same",
cluster1Etcd: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
cluster2Etcd: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
want: true,
},
{
testName: "both exist, count diff",
cluster1Etcd: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
cluster2Etcd: &v1alpha1.ExternalEtcdConfiguration{
Count: 2,
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
want: false,
},
{
testName: "both exist, ref diff",
cluster1Etcd: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: "k1",
Name: "n1",
},
},
cluster2Etcd: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: "k2",
Name: "n2",
},
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ExternalEtcdConfiguration: tt.cluster1Etcd,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ExternalEtcdConfiguration: tt.cluster2Etcd,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualProxyConfiguration(t *testing.T) {
testCases := []struct {
testName string
cluster1Proxy, cluster2Proxy *v1alpha1.ProxyConfiguration
want bool
}{
{
testName: "both nil",
cluster1Proxy: nil,
cluster2Proxy: nil,
want: true,
},
{
testName: "one nil, one exists",
cluster1Proxy: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4",
},
cluster2Proxy: nil,
want: false,
},
{
testName: "both exist, same",
cluster1Proxy: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4",
},
cluster2Proxy: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4",
},
want: true,
},
{
testName: "both exist, diff",
cluster1Proxy: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4",
},
cluster2Proxy: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.5",
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ProxyConfiguration: tt.cluster1Proxy,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ProxyConfiguration: tt.cluster2Proxy,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualRegistryMirrorConfiguration(t *testing.T) {
testCases := []struct {
testName string
cluster1Regi, cluster2Regi *v1alpha1.RegistryMirrorConfiguration
want bool
}{
{
testName: "both nil",
cluster1Regi: nil,
cluster2Regi: nil,
want: true,
},
{
testName: "one nil, one exists",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "ca",
},
cluster2Regi: nil,
want: false,
},
{
testName: "both exist, same",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "ca",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "ca",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
want: true,
},
{
testName: "both exist, endpoint diff",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "ca",
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.5",
CACertContent: "ca",
},
want: false,
},
{
testName: "both exist, namespaces diff (one nil, one exists)",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
want: false,
},
{
testName: "both exist, namespaces diff (registry)",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "1.2.3.4",
Namespace: "eks-anywhere",
},
},
},
want: false,
},
{
testName: "both exist, namespaces diff (namespace)",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "",
},
},
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: tt.cluster1Regi,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: tt.cluster2Regi,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualPackageConfigurationNetwork(t *testing.T) {
testCases := []struct {
testName string
disable1, disable2 bool
want bool
}{
{
testName: "equal",
disable1: true,
disable2: true,
want: true,
},
{
testName: "not equal",
disable1: true,
disable2: false,
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
Packages: &v1alpha1.PackageConfiguration{
Disable: tt.disable1,
},
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
Packages: &v1alpha1.PackageConfiguration{
Disable: tt.disable2,
},
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualManagement(t *testing.T) {
testCases := []struct {
testName string
cluster1Management, cluster2Management string
want bool
}{
{
testName: "both empty",
cluster1Management: "",
cluster2Management: "",
want: true,
},
{
testName: "one empty, one equal to self",
cluster1Management: "",
cluster2Management: "cluster-1",
want: true,
},
{
testName: "both equal to self",
cluster1Management: "cluster-1",
cluster2Management: "cluster-1",
want: true,
},
{
testName: "one empty, one not equal to self",
cluster1Management: "",
cluster2Management: "cluster-2",
want: false,
},
{
testName: "one equal to self, one not equal to self",
cluster1Management: "cluster-1",
cluster2Management: "cluster-2",
want: false,
},
{
testName: "both not equal to self and different",
cluster1Management: "cluster-2",
cluster2Management: "cluster-3",
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-1",
},
Spec: v1alpha1.ClusterSpec{
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.cluster1Management,
},
},
}
cluster2 := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-1",
},
Spec: v1alpha1.ClusterSpec{
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.cluster2Management,
},
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualEksaVersion(t *testing.T) {
ver := v1alpha1.EksaVersion("v1.0.0")
testCases := []struct {
testName string
version1, version2 *v1alpha1.EksaVersion
want bool
}{
{
testName: "both nil",
version1: nil,
version2: nil,
want: true,
},
{
testName: "one nil, one exists",
version1: &test.DevEksaVersion,
version2: nil,
want: false,
},
{
testName: "both exist, same",
version1: &test.DevEksaVersion,
version2: &test.DevEksaVersion,
want: true,
},
{
testName: "both exist, diff",
version1: &test.DevEksaVersion,
version2: &ver,
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
EksaVersion: tt.version1,
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
EksaVersion: tt.version2,
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestClusterEqualDifferentBundlesRef(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-1",
},
Spec: v1alpha1.ClusterSpec{
BundlesRef: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "eksa-system",
},
},
}
cluster2 := cluster1.DeepCopy()
cluster2.Spec.BundlesRef.Name = "bundles-2"
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(BeFalse())
}
func TestControlPlaneConfigurationEqual(t *testing.T) {
var emptyTaints []corev1.Taint
taint1 := corev1.Taint{Key: "key1"}
taint2 := corev1.Taint{Key: "key2"}
taints1 := []corev1.Taint{taint1, taint2}
taints1DiffOrder := []corev1.Taint{taint2, taint1}
taints2 := []corev1.Taint{taint1}
testCases := []struct {
testName string
cluster1CPConfig, cluster2CPConfig *v1alpha1.ControlPlaneConfiguration
want bool
}{
{
testName: "both exist, same",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &v1alpha1.Endpoint{
Host: "1.2.3.4",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &v1alpha1.Endpoint{
Host: "1.2.3.4",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
want: true,
},
{
testName: "one nil, one exists",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &v1alpha1.Endpoint{
Host: "1.2.3.4",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
cluster2CPConfig: nil,
want: false,
},
{
testName: "one nil, one exists",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &v1alpha1.Endpoint{
Host: "1.2.3.4",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: "k",
Name: "n",
},
},
cluster2CPConfig: nil,
want: false,
},
{
testName: "count exists, diff",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Count: 1,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{
Count: 2,
},
want: false,
},
{
testName: "one count empty",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Count: 1,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{},
want: false,
},
{
testName: "both taints equal",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints1,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints1,
},
want: true,
},
{
testName: "taints in different orders",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints1,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints1DiffOrder,
},
want: true,
},
{
testName: "different taints",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints1,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints2,
},
want: false,
},
{
testName: "One taints set empty",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints1,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: emptyTaints,
},
want: false,
},
{
testName: "one taints set not present",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: taints1,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{},
want: false,
},
{
testName: "both taints set empty",
cluster1CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: emptyTaints,
},
cluster2CPConfig: &v1alpha1.ControlPlaneConfiguration{
Taints: emptyTaints,
},
want: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster1CPConfig.Equal(tt.cluster2CPConfig)).To(Equal(tt.want))
})
}
}
func TestControlPlaneConfigurationEndpointEqual(t *testing.T) {
testCases := []struct {
testName, cluster1CPHost, cluster2CPHost, clusterDatacenterKind string
want bool
}{
{
testName: "one default port, one no port",
cluster1CPHost: "1.2.3.4",
clusterDatacenterKind: v1alpha1.VSphereDatacenterKind,
cluster2CPHost: "1.2.3.5",
want: false,
},
{
testName: "one default port, one no port",
cluster1CPHost: "1.2.3.4",
clusterDatacenterKind: v1alpha1.VSphereDatacenterKind,
cluster2CPHost: "",
want: false,
},
{
testName: "one default port, one no port",
cluster1CPHost: "",
clusterDatacenterKind: v1alpha1.VSphereDatacenterKind,
cluster2CPHost: "",
want: true,
},
{
testName: "one default port, one no port",
cluster1CPHost: "1.1.1.1:6443",
clusterDatacenterKind: v1alpha1.CloudStackDatacenterKind,
cluster2CPHost: "1.1.1.1",
want: true,
},
{
testName: "one default port, one no port",
cluster1CPHost: "1.1.1.1",
clusterDatacenterKind: v1alpha1.CloudStackDatacenterKind,
cluster2CPHost: "1.1.1.1:6443",
want: true,
},
{
testName: "one default port, one no port",
cluster1CPHost: "1.1.1.1",
clusterDatacenterKind: v1alpha1.CloudStackDatacenterKind,
cluster2CPHost: "1.1.1.2",
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
cluster1 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: tt.clusterDatacenterKind,
},
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{
Host: tt.cluster1CPHost,
},
},
},
}
cluster2 := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: tt.clusterDatacenterKind,
},
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{
Host: tt.cluster2CPHost,
},
},
},
}
g := NewWithT(t)
g.Expect(cluster1.Equal(cluster2)).To(Equal(tt.want))
})
}
}
func TestRegistryMirrorConfigurationEqual(t *testing.T) {
testCases := []struct {
testName string
cluster1Regi, cluster2Regi *v1alpha1.RegistryMirrorConfiguration
want bool
}{
{
testName: "both nil",
cluster1Regi: nil,
cluster2Regi: nil,
want: true,
},
{
testName: "one nil, one exists",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "ca",
},
cluster2Regi: nil,
want: false,
},
{
testName: "both exist, same",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "ca",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "ca",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
want: true,
},
{
testName: "both exist, endpoint diff",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
CACertContent: "",
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.5",
},
want: false,
},
{
testName: "both exist, ca diff",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
CACertContent: "ca1",
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
CACertContent: "ca2",
},
want: false,
},
{
testName: "both exist, namespaces diff (one nil, one exists)",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
want: false,
},
{
testName: "both exist, namespaces diff (registry)",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "",
Namespace: "eks-anywhere",
},
},
},
want: false,
},
{
testName: "both exist, namespaces diff (namespace)",
cluster1Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
},
},
cluster2Regi: &v1alpha1.RegistryMirrorConfiguration{
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "",
},
},
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster1Regi.Equal(tt.cluster2Regi)).To(Equal(tt.want))
})
}
}
func TestPodIAMServiceAccountIssuerHasNotChanged(t *testing.T) {
testCases := []struct {
testName string
cluster1PodIAMConfig, cluster2PodIAMConfig *v1alpha1.PodIAMConfig
want bool
}{
{
testName: "both nil",
cluster1PodIAMConfig: nil,
cluster2PodIAMConfig: nil,
want: true,
},
{
testName: "one nil, one exists",
cluster1PodIAMConfig: &v1alpha1.PodIAMConfig{
ServiceAccountIssuer: "https://test",
},
cluster2PodIAMConfig: nil,
want: false,
},
{
testName: "both exist, same",
cluster1PodIAMConfig: &v1alpha1.PodIAMConfig{
ServiceAccountIssuer: "https://test",
},
cluster2PodIAMConfig: &v1alpha1.PodIAMConfig{
ServiceAccountIssuer: "https://test",
},
want: true,
},
{
testName: "both exist, service account issuer different",
cluster1PodIAMConfig: &v1alpha1.PodIAMConfig{
ServiceAccountIssuer: "https://test1",
},
cluster2PodIAMConfig: &v1alpha1.PodIAMConfig{
ServiceAccountIssuer: "https://test2",
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster1PodIAMConfig.Equal(tt.cluster2PodIAMConfig)).To(Equal(tt.want))
})
}
}
func TestBundlesRefEqual(t *testing.T) {
testCases := []struct {
testName string
bundlesRef1, bundlesRef2 *v1alpha1.BundlesRef
want bool
}{
{
testName: "both nil",
bundlesRef1: nil,
bundlesRef2: nil,
want: true,
},
{
testName: "1 nil, 2 not nil",
bundlesRef1: nil,
bundlesRef2: &v1alpha1.BundlesRef{},
want: false,
},
{
testName: "1 not nil, 2 nil",
bundlesRef1: &v1alpha1.BundlesRef{},
bundlesRef2: nil,
want: false,
},
{
testName: "diff APIVersion",
bundlesRef1: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "eksa-system",
},
bundlesRef2: &v1alpha1.BundlesRef{
APIVersion: "v2",
Name: "bundles-1",
Namespace: "eksa-system",
},
want: false,
},
{
testName: "diff Name",
bundlesRef1: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "eksa-system",
},
bundlesRef2: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-2",
Namespace: "eksa-system",
},
want: false,
},
{
testName: "diff Namespace",
bundlesRef1: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "eksa-system",
},
bundlesRef2: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "default",
},
want: false,
},
{
testName: "everything different",
bundlesRef1: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "eksa-system",
},
bundlesRef2: &v1alpha1.BundlesRef{
APIVersion: "v2",
Name: "bundles-2",
Namespace: "default",
},
want: false,
},
{
testName: "equal",
bundlesRef1: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "eksa-system",
},
bundlesRef2: &v1alpha1.BundlesRef{
APIVersion: "v1",
Name: "bundles-1",
Namespace: "eksa-system",
},
want: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.bundlesRef1.Equal(tt.bundlesRef2)).To(Equal(tt.want))
})
}
}
func setSelfManaged(c *v1alpha1.Cluster, s bool) {
if s {
c.SetSelfManaged()
} else {
c.SetManagedBy("management-cluster")
}
}
func TestNodes_Equal(t *testing.T) {
tests := []struct {
name string
nodes1, nodes2 *v1alpha1.Nodes
want bool
}{
{
name: "one nil",
nodes1: nil,
nodes2: &v1alpha1.Nodes{},
want: false,
},
{
name: "other nil",
nodes1: &v1alpha1.Nodes{},
nodes2: nil,
want: false,
},
{
name: "both nil",
nodes1: nil,
nodes2: nil,
want: true,
},
{
name: "one nil CIDRMasK",
nodes1: &v1alpha1.Nodes{},
nodes2: &v1alpha1.Nodes{
CIDRMaskSize: ptr.Int(2),
},
want: false,
},
{
name: "both nil CIDRMasK",
nodes1: &v1alpha1.Nodes{},
nodes2: &v1alpha1.Nodes{},
want: true,
},
{
name: "different not nil CIDRMasK",
nodes1: &v1alpha1.Nodes{
CIDRMaskSize: ptr.Int(3),
},
nodes2: &v1alpha1.Nodes{
CIDRMaskSize: ptr.Int(2),
},
want: false,
},
{
name: "equal not nil CIDRMasK",
nodes1: &v1alpha1.Nodes{
CIDRMaskSize: ptr.Int(2),
},
nodes2: &v1alpha1.Nodes{
CIDRMaskSize: ptr.Int(2),
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.nodes1.Equal(tt.nodes2)).To(Equal(tt.want))
})
}
}
func TestClusterHasAWSIamConfig(t *testing.T) {
tests := []struct {
name string
cluster *v1alpha1.Cluster
want bool
}{
{
name: "has AWSIamConfig",
cluster: &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
},
Spec: v1alpha1.ClusterSpec{
IdentityProviderRefs: []v1alpha1.Ref{
{
Name: "aws-config",
Kind: "AWSIamConfig",
},
},
},
},
want: true,
},
{
name: "no AWSIamConfig",
cluster: &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster.HasAWSIamConfig()).To(Equal(tt.want))
})
}
}
func TestPackageConfiguration_Equal(t *testing.T) {
same := &v1alpha1.PackageConfiguration{Disable: false}
tests := []struct {
name string
pcn, pco *v1alpha1.PackageConfiguration
want bool
}{
{
name: "one nil",
pcn: &v1alpha1.PackageConfiguration{},
pco: nil,
want: false,
},
{
name: "other nil",
pcn: nil,
pco: &v1alpha1.PackageConfiguration{},
want: false,
},
{
name: "both nil",
pcn: nil,
pco: nil,
want: true,
},
{
name: "equal",
pcn: &v1alpha1.PackageConfiguration{Disable: true},
pco: &v1alpha1.PackageConfiguration{Disable: true},
want: true,
},
{
name: "not equal",
pcn: &v1alpha1.PackageConfiguration{Disable: true},
pco: &v1alpha1.PackageConfiguration{Disable: false},
want: false,
},
{
name: "not equal controller",
pcn: &v1alpha1.PackageConfiguration{
Disable: true,
Controller: &v1alpha1.PackageControllerConfiguration{
Tag: "v1",
},
},
pco: &v1alpha1.PackageConfiguration{
Disable: false,
Controller: &v1alpha1.PackageControllerConfiguration{
Tag: "v2",
},
},
want: false,
},
{
name: "equal controller",
pcn: &v1alpha1.PackageConfiguration{
Controller: &v1alpha1.PackageControllerConfiguration{
Tag: "v1",
},
},
pco: &v1alpha1.PackageConfiguration{
Controller: &v1alpha1.PackageControllerConfiguration{
Tag: "v1",
},
},
want: true,
},
{
name: "not equal cronjob",
pcn: &v1alpha1.PackageConfiguration{
Disable: true,
CronJob: &v1alpha1.PackageControllerCronJob{
Tag: "v1",
},
},
pco: &v1alpha1.PackageConfiguration{
Disable: false,
CronJob: &v1alpha1.PackageControllerCronJob{
Tag: "v2",
},
},
want: false,
},
{
name: "equal cronjob",
pcn: &v1alpha1.PackageConfiguration{
CronJob: &v1alpha1.PackageControllerCronJob{
Tag: "v1",
},
},
pco: &v1alpha1.PackageConfiguration{
CronJob: &v1alpha1.PackageControllerCronJob{
Tag: "v1",
},
},
want: true,
},
{
name: "same",
pcn: same,
pco: same,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.pcn.Equal(tt.pco)).To(Equal(tt.want))
})
}
}
func TestPackageControllerConfiguration_Equal(t *testing.T) {
same := &v1alpha1.PackageControllerConfiguration{Tag: "v1"}
tests := []struct {
name string
pcn, pco *v1alpha1.PackageControllerConfiguration
want bool
}{
{
name: "one nil",
pcn: &v1alpha1.PackageControllerConfiguration{},
pco: nil,
want: false,
},
{
name: "other nil",
pcn: nil,
pco: &v1alpha1.PackageControllerConfiguration{},
want: false,
},
{
name: "both nil",
pcn: nil,
pco: nil,
want: true,
},
{
name: "equal Repository",
pcn: &v1alpha1.PackageControllerConfiguration{Repository: "a"},
pco: &v1alpha1.PackageControllerConfiguration{Repository: "a"},
want: true,
},
{
name: "not equal Repository",
pcn: &v1alpha1.PackageControllerConfiguration{Repository: "a"},
pco: &v1alpha1.PackageControllerConfiguration{Repository: "b"},
want: false,
},
{
name: "equal Tag",
pcn: &v1alpha1.PackageControllerConfiguration{Tag: "v1"},
pco: &v1alpha1.PackageControllerConfiguration{Tag: "v1"},
want: true,
},
{
name: "not equal Tag",
pcn: &v1alpha1.PackageControllerConfiguration{Tag: "v1"},
pco: &v1alpha1.PackageControllerConfiguration{Tag: "v2"},
want: false,
},
{
name: "equal Digest",
pcn: &v1alpha1.PackageControllerConfiguration{Digest: "a"},
pco: &v1alpha1.PackageControllerConfiguration{Digest: "a"},
want: true,
},
{
name: "not equal Digest",
pcn: &v1alpha1.PackageControllerConfiguration{Digest: "a"},
pco: &v1alpha1.PackageControllerConfiguration{Digest: "b"},
want: false,
},
{
name: "equal DisableWebhooks",
pcn: &v1alpha1.PackageControllerConfiguration{DisableWebhooks: true},
pco: &v1alpha1.PackageControllerConfiguration{DisableWebhooks: true},
want: true,
},
{
name: "not equal DisableWebhooks",
pcn: &v1alpha1.PackageControllerConfiguration{DisableWebhooks: true},
pco: &v1alpha1.PackageControllerConfiguration{},
want: false,
},
{
name: "equal Env",
pcn: &v1alpha1.PackageControllerConfiguration{Env: []string{"a"}},
pco: &v1alpha1.PackageControllerConfiguration{Env: []string{"a"}},
want: true,
},
{
name: "not equal Env",
pcn: &v1alpha1.PackageControllerConfiguration{Env: []string{"a"}},
pco: &v1alpha1.PackageControllerConfiguration{Env: []string{"b"}},
want: false,
},
{
name: "not equal Resources",
pcn: &v1alpha1.PackageControllerConfiguration{
Resources: v1alpha1.PackageControllerResources{
Requests: v1alpha1.ImageResource{
CPU: "1",
},
},
},
pco: &v1alpha1.PackageControllerConfiguration{
Resources: v1alpha1.PackageControllerResources{
Requests: v1alpha1.ImageResource{
CPU: "2",
},
},
},
want: false,
},
{
name: "same",
pcn: same,
pco: same,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.pcn.Equal(tt.pco)).To(Equal(tt.want))
})
}
}
func TestPackageControllerResources_Equal(t *testing.T) {
same := &v1alpha1.PackageControllerResources{
Limits: v1alpha1.ImageResource{
CPU: "3",
},
}
tests := []struct {
name string
pcn, pco *v1alpha1.PackageControllerResources
want bool
}{
{
name: "one nil",
pcn: &v1alpha1.PackageControllerResources{},
pco: nil,
want: false,
},
{
name: "other nil",
pcn: nil,
pco: &v1alpha1.PackageControllerResources{},
want: false,
},
{
name: "both nil",
pcn: nil,
pco: nil,
want: true,
},
{
name: "equal Requests",
pcn: &v1alpha1.PackageControllerResources{
Requests: v1alpha1.ImageResource{
CPU: "1",
},
},
pco: &v1alpha1.PackageControllerResources{
Requests: v1alpha1.ImageResource{
CPU: "1",
},
},
want: true,
},
{
name: "not equal Requests",
pcn: &v1alpha1.PackageControllerResources{
Requests: v1alpha1.ImageResource{
CPU: "1",
},
},
pco: &v1alpha1.PackageControllerResources{
Requests: v1alpha1.ImageResource{
CPU: "2",
},
},
want: false,
},
{
name: "equal Limits",
pcn: &v1alpha1.PackageControllerResources{
Limits: v1alpha1.ImageResource{
CPU: "1",
},
},
pco: &v1alpha1.PackageControllerResources{
Limits: v1alpha1.ImageResource{
CPU: "1",
},
},
want: true,
},
{
name: "not equal Limits",
pcn: &v1alpha1.PackageControllerResources{
Limits: v1alpha1.ImageResource{
CPU: "1",
},
},
pco: &v1alpha1.PackageControllerResources{
Limits: v1alpha1.ImageResource{
CPU: "2",
},
},
want: false,
},
{
name: "same",
pcn: same,
pco: same,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.pcn.Equal(tt.pco)).To(Equal(tt.want))
})
}
}
func TestImageResource_Equal(t *testing.T) {
same := &v1alpha1.ImageResource{
CPU: "3",
}
tests := []struct {
name string
pcn, pco *v1alpha1.ImageResource
want bool
}{
{
name: "one nil",
pcn: &v1alpha1.ImageResource{},
pco: nil,
want: false,
},
{
name: "other nil",
pcn: nil,
pco: &v1alpha1.ImageResource{},
want: false,
},
{
name: "both nil",
pcn: nil,
pco: nil,
want: true,
},
{
name: "equal CPU",
pcn: &v1alpha1.ImageResource{
CPU: "1",
},
pco: &v1alpha1.ImageResource{
CPU: "1",
},
want: true,
},
{
name: "not equal CPU",
pcn: &v1alpha1.ImageResource{
CPU: "1",
},
pco: &v1alpha1.ImageResource{
CPU: "2",
},
want: false,
},
{
name: "equal Memory",
pcn: &v1alpha1.ImageResource{
Memory: "1",
},
pco: &v1alpha1.ImageResource{
Memory: "1",
},
want: true,
},
{
name: "not equal Memory",
pcn: &v1alpha1.ImageResource{
Memory: "1",
},
pco: &v1alpha1.ImageResource{
Memory: "2",
},
want: false,
},
{
name: "same",
pcn: same,
pco: same,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.pcn.Equal(tt.pco)).To(Equal(tt.want))
})
}
}
func TestPackageControllerCronJob_Equal(t *testing.T) {
same := &v1alpha1.PackageControllerCronJob{
Repository: "3",
}
tests := []struct {
name string
pcn, pco *v1alpha1.PackageControllerCronJob
want bool
}{
{
name: "one nil",
pcn: &v1alpha1.PackageControllerCronJob{},
pco: nil,
want: false,
},
{
name: "other nil",
pcn: nil,
pco: &v1alpha1.PackageControllerCronJob{},
want: false,
},
{
name: "both nil",
pcn: nil,
pco: nil,
want: true,
},
{
name: "equal Repository",
pcn: &v1alpha1.PackageControllerCronJob{
Repository: "1",
},
pco: &v1alpha1.PackageControllerCronJob{
Repository: "1",
},
want: true,
},
{
name: "not equal Repository",
pcn: &v1alpha1.PackageControllerCronJob{
Repository: "1",
},
pco: &v1alpha1.PackageControllerCronJob{
Repository: "2",
},
want: false,
},
{
name: "equal Tag",
pcn: &v1alpha1.PackageControllerCronJob{
Tag: "1",
},
pco: &v1alpha1.PackageControllerCronJob{
Tag: "1",
},
want: true,
},
{
name: "not equal Tag",
pcn: &v1alpha1.PackageControllerCronJob{
Tag: "1",
},
pco: &v1alpha1.PackageControllerCronJob{
Tag: "2",
},
want: false,
},
{
name: "equal Digest",
pcn: &v1alpha1.PackageControllerCronJob{
Digest: "1",
},
pco: &v1alpha1.PackageControllerCronJob{
Digest: "1",
},
want: true,
},
{
name: "not equal Digest",
pcn: &v1alpha1.PackageControllerCronJob{
Digest: "1",
},
pco: &v1alpha1.PackageControllerCronJob{
Digest: "2",
},
want: false,
},
{
name: "equal Disable",
pcn: &v1alpha1.PackageControllerCronJob{
Disable: true,
},
pco: &v1alpha1.PackageControllerCronJob{
Disable: true,
},
want: true,
},
{
name: "not equal Disable",
pcn: &v1alpha1.PackageControllerCronJob{
Disable: true,
},
pco: &v1alpha1.PackageControllerCronJob{
Disable: false,
},
want: false,
},
{
name: "same",
pcn: same,
pco: same,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.pcn.Equal(tt.pco)).To(Equal(tt.want))
})
}
}
func TestCiliumConfigEquality(t *testing.T) {
tests := []struct {
Name string
A *v1alpha1.CiliumConfig
B *v1alpha1.CiliumConfig
Equal bool
}{
{
Name: "Nils",
A: nil,
B: nil,
Equal: true,
},
{
Name: "NilA",
A: nil,
B: &v1alpha1.CiliumConfig{},
Equal: false,
},
{
Name: "NilB",
A: &v1alpha1.CiliumConfig{},
B: nil,
Equal: false,
},
{
Name: "ZeroValues",
A: &v1alpha1.CiliumConfig{},
B: &v1alpha1.CiliumConfig{},
Equal: true,
},
{
Name: "EqualPolicyEnforcement",
A: &v1alpha1.CiliumConfig{
PolicyEnforcementMode: "always",
},
B: &v1alpha1.CiliumConfig{
PolicyEnforcementMode: "always",
},
Equal: true,
},
{
Name: "DiffPolicyEnforcement",
A: &v1alpha1.CiliumConfig{
PolicyEnforcementMode: "always",
},
B: &v1alpha1.CiliumConfig{
PolicyEnforcementMode: "default",
},
Equal: false,
},
{
Name: "NilSkipUpgradeAFalse",
A: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
B: &v1alpha1.CiliumConfig{},
Equal: true,
},
{
Name: "NilSkipUpgradeBFalse",
A: &v1alpha1.CiliumConfig{},
B: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
Equal: true,
},
{
Name: "SkipUpgradeBothFalse",
A: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
B: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
Equal: true,
},
{
Name: "NilSkipUpgradeATrue",
A: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
B: &v1alpha1.CiliumConfig{},
Equal: false,
},
{
Name: "NilSkipUpgradeBTrue",
A: &v1alpha1.CiliumConfig{},
B: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
Equal: false,
},
{
Name: "SkipUpgradeBothTrue",
A: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
B: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
Equal: true,
},
{
Name: "SkipUpgradeAFalseBTrue",
A: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
B: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
Equal: false,
},
{
Name: "SkipUpgradeATrueBFalse",
A: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(true),
},
B: &v1alpha1.CiliumConfig{
SkipUpgrade: ptr.Bool(false),
},
Equal: false,
},
{
Name: "EqualEgressMasqueradeInterfaces",
A: &v1alpha1.CiliumConfig{
EgressMasqueradeInterfaces: "eth0",
},
B: &v1alpha1.CiliumConfig{
EgressMasqueradeInterfaces: "eth0",
},
Equal: true,
},
{
Name: "DiffEgressMasqueradeInterfaces",
A: &v1alpha1.CiliumConfig{
EgressMasqueradeInterfaces: "eth0",
},
B: &v1alpha1.CiliumConfig{
EgressMasqueradeInterfaces: "eth1",
},
Equal: false,
},
}
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tc.A.Equal(tc.B)).To(Equal(tc.Equal))
})
}
}
func TestKubeVersionToValidSemver(t *testing.T) {
type args struct {
kubeVersion v1alpha1.KubernetesVersion
}
tests := []struct {
name string
args args
want *semver.Version
wantErr error
}{
{
name: "convert kube 1.22",
args: args{
kubeVersion: v1alpha1.Kube122,
},
want: &semver.Version{
Major: 1,
Minor: 22,
Patch: 0,
},
wantErr: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := v1alpha1.KubeVersionToSemver(tt.args.kubeVersion)
if err != tt.wantErr {
t.Errorf("KubeVersionToSemver() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("KubeVersionToSemver() = %v, want %v", got, tt.want)
}
})
}
}
func TestClusterIsSingleNode(t *testing.T) {
testCases := []struct {
testName string
cluster *v1alpha1.Cluster
want bool
}{
{
testName: "cluster with single node",
cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 1,
},
},
},
want: true,
},
{
testName: "cluster with cp and worker",
cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 1,
},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(3),
},
},
},
},
want: false,
},
{
testName: "cluster with multiple cp",
cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 3,
},
},
},
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cluster.IsSingleNode()).To(Equal(tt.want))
})
}
}
| 2,814 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"fmt"
"reflect"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/version"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"github.com/aws/eks-anywhere/pkg/features"
)
// log is for logging in this package.
var clusterlog = logf.Log.WithName("cluster-resource")
func (r *Cluster) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
//+kubebuilder:webhook:path=/mutate-anywhere-eks-amazonaws-com-v1alpha1-cluster,mutating=true,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=clusters,verbs=create;update,versions=v1alpha1,name=mutation.cluster.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Defaulter = &Cluster{}
// Default implements webhook.Defaulter so a webhook will be registered for the type.
func (r *Cluster) Default() {
clusterlog.Info("Setting up Cluster defaults", "name", r.Name, "namespace", r.Namespace)
r.SetDefaults()
}
// Change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-cluster,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=clusters,verbs=create;update,versions=v1alpha1,name=validation.cluster.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &Cluster{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *Cluster) ValidateCreate() error {
clusterlog.Info("validate create", "name", r.Name)
var allErrs field.ErrorList
if !r.IsReconcilePaused() {
if r.IsSelfManaged() {
return apierrors.NewBadRequest("creating new cluster on existing cluster is not supported for self managed clusters")
} else if !features.IsActive(features.FullLifecycleAPI()) {
return apierrors.NewBadRequest("creating new managed cluster on existing cluster is not supported")
}
}
if err := r.Validate(); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec"), r.Spec, err.Error()))
}
if len(allErrs) != 0 {
return apierrors.NewInvalid(GroupVersion.WithKind(ClusterKind).GroupKind(), r.Name, allErrs)
}
return nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *Cluster) ValidateUpdate(old runtime.Object) error {
clusterlog.Info("validate update", "name", r.Name)
oldCluster, ok := old.(*Cluster)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a Cluster but got a %T", old))
}
var allErrs field.ErrorList
if r.Spec.DatacenterRef.Kind == TinkerbellDatacenterKind {
allErrs = append(allErrs, validateUpgradeRequestTinkerbell(r, oldCluster)...)
}
allErrs = append(allErrs, validateImmutableFieldsCluster(r, oldCluster)...)
allErrs = append(allErrs, validateBundlesRefCluster(r, oldCluster)...)
allErrs = append(allErrs, ValidateKubernetesVersionSkew(r, oldCluster)...)
if len(allErrs) != 0 {
return apierrors.NewInvalid(GroupVersion.WithKind(ClusterKind).GroupKind(), r.Name, allErrs)
}
if err := r.Validate(); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec"), r.Spec, err.Error()))
}
if len(allErrs) != 0 {
return apierrors.NewInvalid(GroupVersion.WithKind(ClusterKind).GroupKind(), r.Name, allErrs)
}
return nil
}
func validateBundlesRefCluster(new, old *Cluster) field.ErrorList {
var allErrs field.ErrorList
bundlesRefPath := field.NewPath("spec").Child("BundlesRef")
if old.Spec.BundlesRef != nil && new.Spec.BundlesRef == nil {
allErrs = append(
allErrs,
field.Invalid(bundlesRefPath, new.Spec.BundlesRef, fmt.Sprintf("field cannot be removed after setting. Previous value %v", old.Spec.BundlesRef)))
}
return allErrs
}
func validateUpgradeRequestTinkerbell(new, old *Cluster) field.ErrorList {
var allErrs field.ErrorList
path := field.NewPath("spec")
if old.Spec.KubernetesVersion != new.Spec.KubernetesVersion {
if old.Spec.ControlPlaneConfiguration.Count != new.Spec.ControlPlaneConfiguration.Count {
allErrs = append(
allErrs,
field.Invalid(path, new.Spec.ControlPlaneConfiguration, fmt.Sprintf("cannot perform scale up or down during rolling upgrades. Previous control plane node count %v", old.Spec.ControlPlaneConfiguration.Count)))
}
if len(old.Spec.WorkerNodeGroupConfigurations) != len(new.Spec.WorkerNodeGroupConfigurations) {
allErrs = append(
allErrs,
field.Invalid(path, new.Spec.WorkerNodeGroupConfigurations, "cannot perform scale up or down during rolling upgrades. Please revert to the previous worker node groups."))
}
workerNodeGroupMap := make(map[string]*WorkerNodeGroupConfiguration)
for i := range old.Spec.WorkerNodeGroupConfigurations {
workerNodeGroupMap[old.Spec.WorkerNodeGroupConfigurations[i].Name] = &old.Spec.WorkerNodeGroupConfigurations[i]
}
for _, nodeGroupNewSpec := range new.Spec.WorkerNodeGroupConfigurations {
workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]
if ok && *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count {
allErrs = append(
allErrs,
field.Invalid(path, new.Spec.WorkerNodeGroupConfigurations, fmt.Sprintf("cannot perform scale up or down during rolling upgrades. Previous worker node count %v", *workerNodeGrpOldSpec.Count)))
}
if !ok {
allErrs = append(
allErrs,
field.Invalid(path, new.Spec.WorkerNodeGroupConfigurations, fmt.Sprintf("cannot perform scale up or down during rolling upgrades. Please remove the new worker node group %s", nodeGroupNewSpec.Name)))
}
}
}
return allErrs
}
func validateImmutableFieldsCluster(new, old *Cluster) field.ErrorList {
if old.IsReconcilePaused() {
return nil
}
var allErrs field.ErrorList
specPath := field.NewPath("spec")
if !old.ManagementClusterEqual(new) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("managementCluster", new.Spec.ManagementCluster.Name), fmt.Sprintf("field is immutable %v", new.Spec.ManagementCluster)))
}
if !new.Spec.DatacenterRef.Equal(&old.Spec.DatacenterRef) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("datacenterRef"), fmt.Sprintf("field is immutable %v", new.Spec.DatacenterRef)))
}
if !new.Spec.ControlPlaneConfiguration.Endpoint.Equal(old.Spec.ControlPlaneConfiguration.Endpoint, new.Spec.DatacenterRef.Kind) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("ControlPlaneConfiguration.endpoint"), fmt.Sprintf("field is immutable %v", new.Spec.ControlPlaneConfiguration.Endpoint)))
}
if !new.Spec.ClusterNetwork.Pods.Equal(&old.Spec.ClusterNetwork.Pods) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("clusterNetwork", "pods"), "field is immutable"))
}
if !new.Spec.ClusterNetwork.Services.Equal(&old.Spec.ClusterNetwork.Services) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("clusterNetwork", "services"), "field is immutable"))
}
if !new.Spec.ClusterNetwork.DNS.Equal(&old.Spec.ClusterNetwork.DNS) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("clusterNetwork", "dns"), "field is immutable"))
}
// We don't want users to be able to toggle off SkipUpgrade until we've understood the
// implications so we are temporarily disallowing it.
oCNI := old.Spec.ClusterNetwork.CNIConfig
nCNI := new.Spec.ClusterNetwork.CNIConfig
if oCNI != nil && oCNI.Cilium != nil && !oCNI.Cilium.IsManaged() && nCNI.Cilium.IsManaged() {
allErrs = append(
allErrs,
field.Forbidden(
specPath.Child("clusterNetwork", "cniConfig", "cilium", "skipUpgrade"),
"cannot toggle off skipUpgrade once enabled",
),
)
}
if !new.Spec.ClusterNetwork.Nodes.Equal(old.Spec.ClusterNetwork.Nodes) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("clusterNetwork", "nodes"), "field is immutable"))
}
if !new.Spec.ProxyConfiguration.Equal(old.Spec.ProxyConfiguration) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("ProxyConfiguration"), fmt.Sprintf("field is immutable %v", new.Spec.ProxyConfiguration)))
}
if new.Spec.ExternalEtcdConfiguration != nil && old.Spec.ExternalEtcdConfiguration == nil {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("externalEtcdConfiguration"), "cannot switch from local to external etcd topology"),
)
}
if new.Spec.ExternalEtcdConfiguration != nil && old.Spec.ExternalEtcdConfiguration != nil {
if old.Spec.ExternalEtcdConfiguration.Count != new.Spec.ExternalEtcdConfiguration.Count {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("externalEtcdConfiguration.count"), fmt.Sprintf("field is immutable %v", new.Spec.ExternalEtcdConfiguration.Count)),
)
}
}
if !new.Spec.GitOpsRef.Equal(old.Spec.GitOpsRef) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("GitOpsRef"), fmt.Sprintf("field is immutable %v", new.Spec.GitOpsRef)))
}
if new.Spec.DatacenterRef.Kind == TinkerbellDatacenterKind {
if !reflect.DeepEqual(new.Spec.ControlPlaneConfiguration.Labels, old.Spec.ControlPlaneConfiguration.Labels) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("ControlPlaneConfiguration.labels"), fmt.Sprintf("field is immutable %v", new.Spec.ControlPlaneConfiguration.Labels)))
}
if !reflect.DeepEqual(new.Spec.ControlPlaneConfiguration.Taints, old.Spec.ControlPlaneConfiguration.Taints) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("ControlPlaneConfiguration.taints"), fmt.Sprintf("field is immutable %v", new.Spec.ControlPlaneConfiguration.Taints)))
}
workerNodeGroupMap := make(map[string]*WorkerNodeGroupConfiguration)
for i := range old.Spec.WorkerNodeGroupConfigurations {
workerNodeGroupMap[old.Spec.WorkerNodeGroupConfigurations[i].Name] = &old.Spec.WorkerNodeGroupConfigurations[i]
}
for _, nodeGroupNewSpec := range new.Spec.WorkerNodeGroupConfigurations {
if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok {
if !reflect.DeepEqual(workerNodeGrpOldSpec.Labels, nodeGroupNewSpec.Labels) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("WorkerNodeConfiguration.labels"), fmt.Sprintf("field is immutable %v", nodeGroupNewSpec.Labels)))
}
if !reflect.DeepEqual(workerNodeGrpOldSpec.Taints, nodeGroupNewSpec.Taints) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("WorkerNodeConfiguration.taints"), fmt.Sprintf("field is immutable %v", nodeGroupNewSpec.Taints)))
}
}
}
}
if !old.IsSelfManaged() || features.IsActive(features.ExperimentalSelfManagedClusterUpgrade()) {
oldAWSIamConfig, newAWSIamConfig := &Ref{}, &Ref{}
for _, identityProvider := range new.Spec.IdentityProviderRefs {
if identityProvider.Kind == AWSIamConfigKind {
newAWSIamConfig = &identityProvider
break
}
}
for _, identityProvider := range old.Spec.IdentityProviderRefs {
if identityProvider.Kind == AWSIamConfigKind {
oldAWSIamConfig = &identityProvider
break
}
}
if !oldAWSIamConfig.Equal(newAWSIamConfig) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("identityProviderRefs", AWSIamConfigKind), fmt.Sprintf("field is immutable %v", newAWSIamConfig.Kind)))
}
return allErrs
}
clusterlog.Info("Cluster config is associated with management cluster", "name", old.Name)
if !RefSliceEqual(new.Spec.IdentityProviderRefs, old.Spec.IdentityProviderRefs) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("IdentityProviderRefs"), fmt.Sprintf("field is immutable %v", new.Spec.IdentityProviderRefs)))
}
if old.Spec.KubernetesVersion != new.Spec.KubernetesVersion {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("kubernetesVersion"), fmt.Sprintf("field is immutable %v", new.Spec.KubernetesVersion)),
)
}
if !old.Spec.ControlPlaneConfiguration.Equal(&new.Spec.ControlPlaneConfiguration) {
allErrs = append(
allErrs,
field.Forbidden(specPath.Child("ControlPlaneConfiguration"), fmt.Sprintf("field is immutable %v", new.Spec.ControlPlaneConfiguration)))
}
return allErrs
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *Cluster) ValidateDelete() error {
clusterlog.Info("validate delete", "name", r.Name)
return nil
}
// ValidateKubernetesVersionSkew validates Kubernetes version skew between upgrades.
func ValidateKubernetesVersionSkew(new, old *Cluster) field.ErrorList {
var allErrs field.ErrorList
path := field.NewPath("spec")
oldVersion := old.Spec.KubernetesVersion
newVersion := new.Spec.KubernetesVersion
parsedOldVersion, err := version.ParseGeneric(string(oldVersion))
if err != nil {
allErrs = append(
allErrs,
field.Invalid(path, new.Spec.KubernetesVersion, fmt.Sprintf("parsing cluster version: %v", err.Error())))
return allErrs
}
parsedNewVersion, err := version.ParseGeneric(string(newVersion))
if err != nil {
allErrs = append(
allErrs,
field.Invalid(path, new.Spec.KubernetesVersion, fmt.Sprintf("parsing comparison version: %v", err.Error())))
return allErrs
}
if parsedNewVersion.Minor() == parsedOldVersion.Minor() && parsedNewVersion.Major() == parsedOldVersion.Major() {
return allErrs
}
if err := ValidateVersionSkew(parsedOldVersion, parsedNewVersion); err != nil {
allErrs = append(
allErrs,
field.Invalid(path, new.Spec.KubernetesVersion, err.Error()))
}
return allErrs
}
| 385 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
_ "k8s.io/apimachinery/pkg/runtime"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestClusterDefault(t *testing.T) {
cOld := &v1alpha1.Cluster{}
cOld.SetSelfManaged()
cOld.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{
Port: "",
}
cOld.Default()
g := NewWithT(t)
g.Expect(cOld.Spec.ClusterNetwork.CNIConfig).To(Equal(&v1alpha1.CNIConfig{}))
g.Expect(cOld.Spec.RegistryMirrorConfiguration.Port).To(Equal(constants.DefaultHttpsPort))
}
func TestClusterValidateUpdateManagementValueMutableExperimental(t *testing.T) {
features.ClearCache()
t.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.Labels = map[string]string{"Key1": "Val1"}
cOld.Spec.ControlPlaneConfiguration.Taints = []v1.Taint{
{
Key: "Key1",
Value: "Val1",
Effect: "PreferNoSchedule",
},
}
cOld.SetSelfManaged()
cNew := cOld.DeepCopy()
cNew.Spec.ControlPlaneConfiguration.Labels = map[string]string{"Key2": "Val2"}
cNew.Spec.ControlPlaneConfiguration.Taints = []v1.Taint{
{
Key: "Key2",
Value: "Val2",
Effect: "PreferNoSchedule",
},
}
cNew.Spec.ControlPlaneConfiguration.Count = 1
cNew.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "test"
g := NewWithT(t)
g.Expect(cNew.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateManagementValueImmutable(t *testing.T) {
cOld := baseCluster()
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.SetManagedBy("management-cluster")
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("field is immutable")))
}
func TestClusterValidateUpdateManagementOldNilNewTrueSuccess(t *testing.T) {
cOld := baseCluster()
c := cOld.DeepCopy()
c.SetSelfManaged()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateManagementOldNilNewFalseImmutable(t *testing.T) {
cOld := baseCluster()
cOld.SetManagedBy("")
c := cOld.DeepCopy()
c.SetManagedBy("management-cluster")
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("field is immutable")))
}
func TestClusterValidateUpdateManagementBothNilImmutable(t *testing.T) {
cOld := baseCluster()
c := cOld.DeepCopy()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestManagementClusterValidateUpdateKubernetesVersionImmutable(t *testing.T) {
features.ClearCache()
cOld := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: v1alpha1.Kube119,
ExternalEtcdConfiguration: &v1alpha1.ExternalEtcdConfiguration{Count: 3},
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 3, Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
},
},
}
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.Spec.KubernetesVersion = v1alpha1.Kube120
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("field is immutable 1.20")))
}
func TestManagementNilClusterValidateUpdateKubernetesVersionImmutable(t *testing.T) {
cOld := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: v1alpha1.Kube122,
ExternalEtcdConfiguration: &v1alpha1.ExternalEtcdConfiguration{Count: 3},
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 3, Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
},
},
}
c := cOld.DeepCopy()
c.Spec.KubernetesVersion = v1alpha1.Kube120
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("field is immutable 1.20")))
}
func TestWorkloadClusterValidateUpdateKubernetesVersionSuccess(t *testing.T) {
cOld := baseCluster()
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.KubernetesVersion = v1alpha1.Kube122
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestWorkloadClusterValidateUpdateNoUpdateSuccess(t *testing.T) {
cOld := baseCluster()
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestManagementClusterValidateUpdateControlPlaneConfigurationEqual(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
}
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestWorkloadClusterValidateUpdateControlPlaneConfigurationEqual(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
}
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateControlPlaneConfigurationImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
}
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 10,
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/2"},
MachineGroupRef: &v1alpha1.Ref{Name: "test2", Kind: "SecondMachineConfig"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration.endpoint: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateControlPlaneConfigurationOldEndpointImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
}
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/2"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration.endpoint: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateControlPlaneConfigurationOldEndpointNilImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Endpoint: nil,
}
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration.endpoint: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateControlPlaneConfigurationNewEndpointNilImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
}
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Endpoint: nil,
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration.endpoint: Forbidden: field is immutable")))
}
func TestCloudStackClusterValidateUpdateControlPlaneConfigurationOldDefaultPortNewNoPort(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "1.1.1.1:6443"}
cOld.Spec.DatacenterRef.Kind = v1alpha1.CloudStackDatacenterKind
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "1.1.1.1"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestCloudStackClusterValidateUpdateControlPlaneConfigurationOldNoPortNewDefaultPort(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "1.1.1.1"}
cOld.Spec.DatacenterRef.Kind = v1alpha1.CloudStackDatacenterKind
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "1.1.1.1:6443"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestCloudStackClusterValidateUpdateControlPlaneConfigurationOldPortImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "1.1.1.1"}
cOld.Spec.DatacenterRef.Kind = v1alpha1.CloudStackDatacenterKind
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "1.1.1.2"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration.endpoint: Forbidden: field is immutable")))
}
func TestManagementClusterValidateUpdateControlPlaneConfigurationTaintsImmutable(t *testing.T) {
features.ClearCache()
t.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "false")
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Taints: []v1.Taint{
{
Key: "Key1",
Value: "Val1",
Effect: "PreferNoSchedule",
},
},
}
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Taints: []v1.Taint{
{
Key: "Key2",
Value: "Val2",
Effect: "PreferNoSchedule",
},
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration: Forbidden: field is immutable")))
}
func TestManagementClusterValidateUpdateControlPlaneConfigurationLabelsImmutable(t *testing.T) {
features.ClearCache()
t.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "false")
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Labels: map[string]string{
"Key1": "Val1",
},
}
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Labels: map[string]string{
"Key2": "Val2",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration: Forbidden: field is immutable")))
}
func TestManagementClusterValidateUpdateControlPlaneConfigurationOldMachineGroupRefImmutable(t *testing.T) {
features.ClearCache()
t.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "false")
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: &v1alpha1.Ref{Name: "test1", Kind: "MachineConfig"},
}
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: &v1alpha1.Ref{Name: "test2", Kind: "MachineConfig"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration: Forbidden: field is immutable")))
}
func TestWorkloadClusterValidateUpdateControlPlaneConfigurationMachineGroupRef(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "test1", Kind: "MachineConfig"}
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "test2", Kind: "MachineConfig"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestManagementClusterValidateUpdateControlPlaneConfigurationOldMachineGroupRefNilImmutable(t *testing.T) {
features.ClearCache()
t.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "false")
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: nil,
}
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration: Forbidden: field is immutable")))
}
func TestWorkloadClusterValidateUpdateControlPlaneConfigurationOldMachineGroupRefNilSuccess(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.MachineGroupRef = nil
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestManagementClusterValidateUpdateControlPlaneConfigurationNewMachineGroupRefNilImmutable(t *testing.T) {
features.ClearCache()
t.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "false")
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
}
cOld.SetSelfManaged()
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: nil,
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration: Forbidden: field is immutable")))
}
func TestWorkloadClusterValidateUpdateControlPlaneConfigurationNewMachineGroupRefChangedSuccess(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"}
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.MachineGroupRef = &v1alpha1.Ref{Name: "test-2", Kind: "MachineConfig"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestWorkloadClusterValidateUpdateControlPlaneConfigurationNewMachineGroupRefNilError(t *testing.T) {
cOld := baseCluster()
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.MachineGroupRef = nil
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("must specify machineGroupRef control plane machines")))
}
func TestWorkloadClusterValidateUpdateWorkerNodeConfigurationNewMachineGroupRefNilError(t *testing.T) {
cOld := baseCluster()
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef = nil
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("must specify machineGroupRef for worker nodes")))
}
func TestWorkloadClusterValidateUpdateExternalEtcdConfigurationNewMachineGroupRefNilError(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
MachineGroupRef: &v1alpha1.Ref{Name: "test", Kind: "MachineConfig"},
Count: 3,
}
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
MachineGroupRef: nil,
Count: 3,
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("must specify machineGroupRef for etcd machines")))
}
func TestClusterValidateUpdateDatacenterRefImmutableEqual(t *testing.T) {
cOld := baseCluster()
cOld.Spec.DatacenterRef = v1alpha1.Ref{
Name: "test", Kind: "DatacenterConfig",
}
c := cOld.DeepCopy()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateDatacenterRefImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.DatacenterRef = v1alpha1.Ref{
Name: "test", Kind: "DatacenterConfig",
}
c := cOld.DeepCopy()
c.Spec.DatacenterRef = v1alpha1.Ref{Name: "test2", Kind: "SecondDatacenterConfig"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.datacenterRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateDatacenterRefImmutableName(t *testing.T) {
cOld := baseCluster()
cOld.Spec.DatacenterRef = v1alpha1.Ref{
Name: "test", Kind: "DatacenterConfig",
}
c := cOld.DeepCopy()
c.Spec.DatacenterRef = v1alpha1.Ref{Name: "test2", Kind: "DatacenterConfig"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.datacenterRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateDatacenterRefNilImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.DatacenterRef = v1alpha1.Ref{
Name: "test", Kind: "DatacenterConfig",
}
c := cOld.DeepCopy()
c.Spec.DatacenterRef = v1alpha1.Ref{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.datacenterRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateExternalEtcdReplicasImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3}
cOld.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
}
c := cOld.DeepCopy()
c.Spec.ExternalEtcdConfiguration.Count = 5
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.externalEtcdConfiguration.count: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateDataCenterRefNameImmutable(t *testing.T) {
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.DatacenterRef.Name = "FancyNewDataCenter"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.datacenterRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateDataCenterRefKindImmutable(t *testing.T) {
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.DatacenterRef.Name = v1alpha1.DockerDatacenterKind
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.datacenterRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateClusterNetworkPodsImmutable(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"1.2.3.4/5"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.clusterNetwork.pods: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateClusterNetworkServicesImmutable(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.ClusterNetwork.Services.CidrBlocks = []string{"1.2.3.4/9", "1.2.3.4/10"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.clusterNetwork.services: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateClusterNetworkDNSImmutable(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ClusterNetwork = v1alpha1.ClusterNetwork{
DNS: v1alpha1.DNS{
ResolvConf: &v1alpha1.ResolvConf{
Path: "my-path",
},
},
}
c := cOld.DeepCopy()
c.Spec.ClusterNetwork.DNS.ResolvConf.Path = "other-path"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.clusterNetwork.dns: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateClusterNetworkNodesImmutable(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.ClusterNetwork.Nodes = &v1alpha1.Nodes{
CIDRMaskSize: ptr.Int(10),
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.clusterNetwork.nodes: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateProxyConfigurationEqualOrder(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "http://test.com:1",
HttpsProxy: "https://test.com:1",
NoProxy: []string{
"noproxy1",
"noproxy2",
},
}
c := cOld.DeepCopy()
c.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "http://test.com:1",
HttpsProxy: "https://test.com:1",
NoProxy: []string{
"noproxy2",
"noproxy1",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateProxyConfigurationImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "http://test.com",
HttpsProxy: "https://test.com",
NoProxy: []string{"noproxy1"},
}
c := cOld.DeepCopy()
c.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "http://test.com",
HttpsProxy: "https://test.com",
NoProxy: []string{"noproxy1", "noproxy2"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ProxyConfiguration: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateProxyConfigurationNoProxyImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
}
c := cOld.DeepCopy()
c.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
NoProxy: []string{"noproxy"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ProxyConfiguration: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateProxyConfigurationOldNilImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ProxyConfiguration = nil
c := cOld.DeepCopy()
c.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
NoProxy: []string{"noproxy"},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ProxyConfiguration: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateProxyConfigurationNewNilImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "httpproxy",
HttpsProxy: "httpsproxy",
NoProxy: []string{"noproxy"},
}
c := cOld.DeepCopy()
c.Spec.ProxyConfiguration = nil
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.ProxyConfiguration: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateGitOpsRefImmutableNilEqual(t *testing.T) {
cOld := baseCluster()
cOld.Spec.GitOpsRef = nil
c := cOld.DeepCopy()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateGitOpsRefImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.GitOpsRef = &v1alpha1.Ref{}
c := cOld.DeepCopy()
c.Spec.GitOpsRef = &v1alpha1.Ref{Name: "test2", Kind: "GitOpsConfig2"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.GitOpsRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateGitOpsRefImmutableName(t *testing.T) {
cOld := baseCluster()
cOld.Spec.GitOpsRef = &v1alpha1.Ref{
Name: "test1", Kind: "GitOpsConfig",
}
c := cOld.DeepCopy()
c.Spec.GitOpsRef = &v1alpha1.Ref{Name: "test2", Kind: "GitOpsConfig"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.GitOpsRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateGitOpsRefImmutableKind(t *testing.T) {
cOld := baseCluster()
cOld.Spec.GitOpsRef = &v1alpha1.Ref{
Name: "test", Kind: "GitOpsConfig1",
}
c := cOld.DeepCopy()
c.Spec.GitOpsRef = &v1alpha1.Ref{Name: "test", Kind: "GitOpsConfig2"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.GitOpsRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateGitOpsRefOldNilImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.GitOpsRef = nil
c := cOld.DeepCopy()
c.Spec.GitOpsRef = &v1alpha1.Ref{Name: "test", Kind: "GitOpsConfig"}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.GitOpsRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateGitOpsRefNewNilImmutable(t *testing.T) {
cOld := baseCluster()
cOld.Spec.GitOpsRef = &v1alpha1.Ref{
Name: "test", Kind: "GitOpsConfig",
}
c := cOld.DeepCopy()
c.Spec.GitOpsRef = nil
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.GitOpsRef: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateAWSIamNameImmutableUpdateSameName(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
c := baseCluster()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateAWSIamNameImmutableUpdateSameNameWorkloadCluster(t *testing.T) {
cOld := baseCluster()
cOld.SetManagedBy("mgmt2")
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
c := baseCluster()
c.SetManagedBy("mgmt2")
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateAWSIamNameImmutableUpdateName(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
c := baseCluster()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name2",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.IdentityProviderRefs: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateAWSIamNameImmutableEmpty(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
c := baseCluster()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.IdentityProviderRefs: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateAWSIamNameImmutableAddConfig(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{}
c := cOld.DeepCopy()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.IdentityProviderRefs: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateUnsetBundlesRefImmutable(t *testing.T) {
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.BundlesRef = nil
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.BundlesRef: Invalid value: \"null\": field cannot be removed after setting")))
}
func TestClusterValidateUpdateOIDCNameMutableUpdateNameWorkloadCluster(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.IdentityProviderRefs[0].Name = "name2"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateOIDCNameMutableUpdateNameMgmtCluster(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
c := cOld.DeepCopy()
c.Spec.IdentityProviderRefs[0].Name = "name2"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.IdentityProviderRefs: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateOIDCNameMutableUpdateNameUnchanged(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
c := cOld.DeepCopy()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateOIDCNameMutableWorkloadCluster(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
cOld.SetManagedBy("mgmt2")
c := baseCluster()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{}
c.SetManagedBy("mgmt2")
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateOIDCNameMutableMgmtCluster(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
c := baseCluster()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.IdentityProviderRefs: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateOIDCNameMutableAddConfigWorkloadCluster(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{}
cOld.SetManagedBy("mgmt2")
c := baseCluster()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
c.SetManagedBy("mgmt2")
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateOIDCNameMutableAddConfigMgmtCluster(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{}
c := cOld.DeepCopy()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.IdentityProviderRefs: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateSwapIdentityProviders(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
c := baseCluster()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateSwapIdentityProvidersWorkloadCluster(t *testing.T) {
cOld := baseCluster()
cOld.SetManagedBy("mgmt2")
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
}
c := baseCluster()
c.SetManagedBy("mgmt2")
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.AWSIamConfigKind,
Name: "name1",
},
{
Kind: v1alpha1.OIDCConfigKind,
Name: "name1",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateEmptyIdentityProviders(t *testing.T) {
cOld := baseCluster()
cOld.Spec.IdentityProviderRefs = []v1alpha1.Ref{}
c := cOld.DeepCopy()
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateGitOpsRefOldEmptyImmutable(t *testing.T) {
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: "identity",
Name: "name",
},
}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("spec.IdentityProviderRefs: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateWithPausedAnnotation(t *testing.T) {
cOld := baseCluster()
cOld.ObjectMeta.Annotations = make(map[string]string, 1)
cOld.PauseReconcile()
c := cOld.DeepCopy()
c.Spec.KubernetesVersion = v1alpha1.Kube122
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateInvalidType(t *testing.T) {
cOld := &v1alpha1.VSphereDatacenterConfig{}
c := &v1alpha1.Cluster{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("expected a Cluster but got a *v1alpha1.VSphereDatacenterConfig")))
}
func TestClusterValidateUpdateSuccess(t *testing.T) {
features.ClearCache()
workerConfiguration := append([]v1alpha1.WorkerNodeGroupConfiguration{}, v1alpha1.WorkerNodeGroupConfiguration{Count: ptr.Int(5), Name: "test", MachineGroupRef: &v1alpha1.Ref{Name: "ref-name"}})
cOld := baseCluster()
cOld.Spec.WorkerNodeGroupConfigurations = workerConfiguration
c := cOld.DeepCopy()
c.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(10)
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterCreateManagementCluster(t *testing.T) {
features.ClearCache()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
workerConfiguration := append([]v1alpha1.WorkerNodeGroupConfiguration{}, v1alpha1.WorkerNodeGroupConfiguration{Count: ptr.Int(5)})
cluster := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
WorkerNodeGroupConfigurations: workerConfiguration,
KubernetesVersion: v1alpha1.Kube119,
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 3, Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"},
},
ExternalEtcdConfiguration: &v1alpha1.ExternalEtcdConfiguration{Count: 3},
},
}
g := NewWithT(t)
g.Expect(cluster.ValidateCreate()).To(MatchError(ContainSubstring("creating new cluster on existing cluster is not supported for self managed clusters")))
}
func TestClusterCreateCloudStackMultipleWorkerNodeGroupsValidation(t *testing.T) {
features.ClearCache()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cluster := baseCluster()
cluster.Spec.WorkerNodeGroupConfigurations = append([]v1alpha1.WorkerNodeGroupConfiguration{},
v1alpha1.WorkerNodeGroupConfiguration{Count: ptr.Int(5), Name: "test", MachineGroupRef: &v1alpha1.Ref{Name: "ref-name"}},
v1alpha1.WorkerNodeGroupConfiguration{Count: ptr.Int(5), Name: "test2", MachineGroupRef: &v1alpha1.Ref{Name: "ref-name"}})
cluster.Spec.ManagementCluster.Name = "management-cluster"
g := NewWithT(t)
g.Expect(cluster.ValidateCreate()).To(Succeed())
}
func TestClusterCreateWorkloadCluster(t *testing.T) {
features.ClearCache()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cluster := baseCluster()
cluster.Spec.WorkerNodeGroupConfigurations = append([]v1alpha1.WorkerNodeGroupConfiguration{},
v1alpha1.WorkerNodeGroupConfiguration{
Count: ptr.Int(5),
Name: "md-0",
MachineGroupRef: &v1alpha1.Ref{
Name: "test",
},
})
cluster.Spec.KubernetesVersion = v1alpha1.Kube119
cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3, Endpoint: &v1alpha1.Endpoint{Host: "1.1.1.1/1"}, MachineGroupRef: &v1alpha1.Ref{Name: "test"},
}
cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3, MachineGroupRef: &v1alpha1.Ref{Name: "test"}}
cluster.Spec.ManagementCluster.Name = "management-cluster"
g := NewWithT(t)
g.Expect(cluster.ValidateCreate()).To(Succeed())
}
func TestClusterUpdateWorkerNodeGroupTaintsAndLabelsSuccess(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{
Name: "test",
Taints: []v1.Taint{{
Key: "test",
Value: "test",
Effect: "PreferNoSchedule",
}},
Labels: map[string]string{
"test": "val1",
},
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Name: "test",
},
}}
c := cOld.DeepCopy()
c.Spec.WorkerNodeGroupConfigurations[0].Taints[0].Value = "test2"
c.Spec.WorkerNodeGroupConfigurations[0].Labels["test"] = "val2"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterUpdateWorkerNodeGroupTaintsInvalid(t *testing.T) {
cOld := baseCluster()
cOld.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{
Count: ptr.Int(1),
Name: "test",
Taints: []v1.Taint{{
Key: "test",
Value: "test",
Effect: "PreferNoSchedule",
}},
MachineGroupRef: &v1alpha1.Ref{},
}}
c := cOld.DeepCopy()
c.Spec.WorkerNodeGroupConfigurations[0].Taints[0].Effect = "NoSchedule"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("at least one WorkerNodeGroupConfiguration must not have NoExecute and/or NoSchedule taints")))
}
func TestClusterUpdateWorkerNodeGroupNameInvalid(t *testing.T) {
cOld := baseCluster()
c := cOld.DeepCopy()
c.Spec.WorkerNodeGroupConfigurations[0].Name = ""
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("must specify name for worker nodes")))
}
func TestClusterUpdateWorkerNodeGroupLabelsInvalid(t *testing.T) {
cOld := baseCluster()
cOld.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{
Count: ptr.Int(1),
Name: "test",
Labels: map[string]string{
"test": "val1",
},
MachineGroupRef: &v1alpha1.Ref{},
}}
c := cOld.DeepCopy()
c.Spec.WorkerNodeGroupConfigurations[0].Labels["test"] = "val1/val2"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(MatchError(ContainSubstring("labels for worker node group test not valid: found following errors with labels: spec.workerNodeGroupConfigurations[0].labels: Invalid value:")))
}
func TestClusterUpdateControlPlaneTaintsAndLabelsSuccess(t *testing.T) {
cOld := baseCluster()
cOld.Spec.ControlPlaneConfiguration.Taints = []v1.Taint{{
Key: "test",
Value: "test",
Effect: "PreferNoSchedule",
}}
cOld.Spec.ControlPlaneConfiguration.Labels = map[string]string{
"test": "val1",
}
cOld.SetManagedBy("management-cluster")
c := cOld.DeepCopy()
c.Spec.ControlPlaneConfiguration.Taints[0].Value = "test2"
c.Spec.ControlPlaneConfiguration.Labels["test"] = "val2"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterUpdateControlPlaneLabelsInvalid(t *testing.T) {
cluster := baseCluster()
cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Labels: map[string]string{
"test": "val1",
},
Endpoint: &v1alpha1.Endpoint{"1.1.1.1"},
MachineGroupRef: &v1alpha1.Ref{},
Count: 1,
}
cluster.SetManagedBy("management-cluster")
c := cluster.DeepCopy()
c.Spec.ControlPlaneConfiguration.Labels["test"] = "val1/val2"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(cluster)).To(MatchError(ContainSubstring("spec.controlPlaneConfiguration.labels: Invalid value")))
}
func TestClusterValidateCreateSelfManagedUnpaused(t *testing.T) {
features.ClearCache()
cluster := baseCluster()
g := NewWithT(t)
cluster.SetSelfManaged()
err := cluster.ValidateCreate()
g.Expect(err).To(MatchError(ContainSubstring("creating new cluster on existing cluster is not supported for self managed clusters")))
}
func TestClusterValidateCreateManagedUnpaused(t *testing.T) {
features.ClearCache()
t.Setenv(features.FullLifecycleAPIEnvVar, "")
cluster := baseCluster()
g := NewWithT(t)
cluster.SetManagedBy("mgmt2")
err := cluster.ValidateCreate()
g.Expect(err.Error()).To(ContainSubstring("creating new managed cluster on existing cluster is not supported"))
}
func TestClusterValidateCreateSelfManagedNotPaused(t *testing.T) {
features.ClearCache()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cluster := baseCluster()
cluster.SetSelfManaged()
g := NewWithT(t)
err := cluster.ValidateCreate()
g.Expect(err).To(MatchError(ContainSubstring("creating new cluster on existing cluster is not supported for self managed clusters")))
}
func TestClusterValidateCreateInvalidCluster(t *testing.T) {
tests := []struct {
name string
featureGateEnabled bool
cluster *v1alpha1.Cluster
}{
{
name: "Paused self-managed cluster, feature gate off",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
}),
featureGateEnabled: false,
},
{
name: "Paused workload cluster, feature gate off",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: false,
},
{
name: "Paused self-managed cluster, feature gate on",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
}),
featureGateEnabled: true,
},
{
name: "Paused workload cluster, feature gate on",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: true,
},
{
name: "Unpaused workload cluster, feature gate on",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
}),
featureGateEnabled: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
features.ClearCache()
if tt.featureGateEnabled {
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
}
// Invalid control plane configuration
tt.cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{Endpoint: &v1alpha1.Endpoint{Host: "test-ip"}, MachineGroupRef: &v1alpha1.Ref{Name: "test"}}
g := NewWithT(t)
err := tt.cluster.ValidateCreate()
g.Expect(err).To(MatchError(ContainSubstring("control plane node count must be positive")))
})
}
}
func TestClusterValidateUpdateInvalidManagementCluster(t *testing.T) {
features.ClearCache()
tests := []struct {
name string
featureGateEnabled bool
clusterNew *v1alpha1.Cluster
}{
{
name: "Paused self-managed cluster, feature gate off",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
}),
featureGateEnabled: false,
},
{
name: "Unpaused self-managed cluster, feature gate off",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
}),
featureGateEnabled: false,
},
{
name: "Paused self-managed cluster, feature gate on",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
}),
featureGateEnabled: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
features.ClearCache()
if tt.featureGateEnabled {
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
}
clusterOld := baseCluster()
clusterOld.SetSelfManaged()
tt.clusterNew.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{
Name: "md-0",
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}}
g := NewWithT(t)
err := tt.clusterNew.ValidateUpdate(clusterOld)
g.Expect(err).To(MatchError(ContainSubstring("worker node count must be >= 0")))
})
}
}
func TestClusterValidateUpdateInvalidWorkloadCluster(t *testing.T) {
features.ClearCache()
tests := []struct {
name string
featureGateEnabled bool
clusterNew *v1alpha1.Cluster
}{
{
name: "Paused workload cluster, feature gate off",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: false,
},
{
name: "Unpaused workload cluster, feature gate off",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
}),
featureGateEnabled: false,
},
{
name: "Paused workload cluster, feature gate on",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: true,
},
{
name: "Unpaused workload cluster, feature gate on",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
}),
featureGateEnabled: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
features.ClearCache()
if tt.featureGateEnabled {
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
}
clusterOld := baseCluster()
clusterOld.SetManagedBy("my-management-cluster")
// Invalid control plane configuration
tt.clusterNew.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}
g := NewWithT(t)
err := tt.clusterNew.ValidateUpdate(clusterOld)
g.Expect(err).To(MatchError(ContainSubstring("control plane node count must be positive")))
})
}
}
func TestClusterValidateCreateValidCluster(t *testing.T) {
tests := []struct {
name string
featureGateEnabled bool
cluster *v1alpha1.Cluster
}{
{
name: "Paused self-managed cluster, feature gate off",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
}),
featureGateEnabled: false,
},
{
name: "Paused workload cluster, feature gate off",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: false,
},
{
name: "Paused self-managed cluster, feature gate on",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
}),
featureGateEnabled: true,
},
{
name: "Paused workload cluster, feature gate on",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: true,
},
{
name: "Unpaused workload cluster, feature gate on",
cluster: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
}),
featureGateEnabled: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
features.ClearCache()
if tt.featureGateEnabled {
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
}
g := NewWithT(t)
g.Expect(tt.cluster.ValidateCreate()).To(Succeed())
})
}
}
func TestClusterValidateUpdateValidManagementCluster(t *testing.T) {
features.ClearCache()
tests := []struct {
name string
featureGateEnabled bool
oldCluster *v1alpha1.Cluster
updateCluster clusterOpt
}{
{
name: "Paused self-managed cluster, feature gate off",
oldCluster: baseCluster(),
updateCluster: func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
},
featureGateEnabled: false,
},
{
name: "Unpaused self-managed cluster, feature gate off",
oldCluster: baseCluster(),
updateCluster: func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
},
featureGateEnabled: false,
},
{
name: "Paused self-managed cluster, feature gate on",
oldCluster: baseCluster(),
updateCluster: func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
c.PauseReconcile()
},
featureGateEnabled: true,
},
{
name: "Unpaused self-managed cluster, feature gate on, no changes",
oldCluster: baseCluster(
func(c *v1alpha1.Cluster) {
c.SetSelfManaged()
},
),
updateCluster: func(c *v1alpha1.Cluster) {},
featureGateEnabled: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
features.ClearCache()
if tt.featureGateEnabled {
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
}
tt.oldCluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(4),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}}
newCluster := tt.oldCluster.DeepCopy()
tt.updateCluster(newCluster)
g := NewWithT(t)
err := newCluster.ValidateUpdate(tt.oldCluster)
g.Expect(err).To(Succeed())
})
}
}
func TestClusterValidateUpdateValidWorkloadCluster(t *testing.T) {
features.ClearCache()
tests := []struct {
name string
featureGateEnabled bool
clusterNew *v1alpha1.Cluster
}{
{
name: "Paused workload cluster, feature gate off",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: false,
},
{
name: "Unpaused workload cluster, feature gate off",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
}),
featureGateEnabled: false,
},
{
name: "Paused workload cluster, feature gate on",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
c.PauseReconcile()
}),
featureGateEnabled: true,
},
{
name: "Unpaused workload cluster, feature gate on",
clusterNew: newCluster(func(c *v1alpha1.Cluster) {
c.SetManagedBy("my-management-cluster")
}),
featureGateEnabled: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
features.ClearCache()
if tt.featureGateEnabled {
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
}
clusterOld := baseCluster()
clusterOld.SetManagedBy("my-management-cluster")
tt.clusterNew.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(4),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}}
g := NewWithT(t)
err := tt.clusterNew.ValidateUpdate(clusterOld)
g.Expect(err).To(Succeed())
})
}
}
func TestClusterValidateUpdateInvalidRequest(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.SetSelfManaged()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
t.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "false")
cNew := cOld.DeepCopy()
cNew.Spec.ControlPlaneConfiguration.Count = cNew.Spec.ControlPlaneConfiguration.Count + 1
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateRollingAndScalingTinkerbellRequest(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.IsManaged()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cOld.Spec.DatacenterRef.Kind = v1alpha1.TinkerbellDatacenterKind
cOld.Spec.KubernetesVersion = "1.22"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.23"
cNew.Spec.ControlPlaneConfiguration.Count = cNew.Spec.ControlPlaneConfiguration.Count + 1
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("cannot perform scale up or down during rolling upgrades. Previous control plane node count")))
}
func TestClusterValidateUpdateAddWNConfig(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.IsManaged()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cOld.Spec.DatacenterRef.Kind = v1alpha1.TinkerbellDatacenterKind
cOld.Spec.KubernetesVersion = "1.22"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.23"
addWNC := v1alpha1.WorkerNodeGroupConfiguration{
Name: "md-1",
Count: ptr.Int(1),
}
cNew.Spec.WorkerNodeGroupConfigurations = append(cNew.Spec.WorkerNodeGroupConfigurations, addWNC)
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("cannot perform scale up or down during rolling upgrades. Please remove the new worker node group")))
}
func TestClusterValidateUpdateAddWNCount(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.IsManaged()
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cOld.Spec.DatacenterRef.Kind = v1alpha1.TinkerbellDatacenterKind
cOld.Spec.KubernetesVersion = "1.22"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.23"
cNew.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("cannot perform scale up or down during rolling upgrades. Previous worker node count")))
}
func TestClusterValidateUpdateRollingTinkerbellRequest(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "test"
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cOld.Spec.DatacenterRef.Kind = v1alpha1.TinkerbellDatacenterKind
cOld.Spec.KubernetesVersion = "1.22"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.23"
g := NewWithT(t)
g.Expect(cNew.ValidateUpdate(cOld)).To(Succeed())
}
func TestClusterValidateUpdateLabelTaintsCPTinkerbellRequest(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "test"
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cOld.Spec.DatacenterRef.Kind = v1alpha1.TinkerbellDatacenterKind
nodeLabels := map[string]string{"label1": "foo", "label2": "bar"}
var cpTaints []v1.Taint
cpTaints = append(cpTaints, v1.Taint{Key: "key1", Value: "val1", Effect: "NoSchedule", TimeAdded: nil})
cOld.Spec.ControlPlaneConfiguration.Labels = nodeLabels
cOld.Spec.ControlPlaneConfiguration.Taints = cpTaints
cNew := cOld.DeepCopy()
cNew.Spec.ControlPlaneConfiguration.Labels = map[string]string{}
cNew.Spec.ControlPlaneConfiguration.Taints = []v1.Taint{}
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration.labels: Forbidden: field is immutable")))
g.Expect(err).To(MatchError(ContainSubstring("spec.ControlPlaneConfiguration.taints: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateLabelTaintsWNTinkerbellRequest(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "test"
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cOld.Spec.DatacenterRef.Kind = v1alpha1.TinkerbellDatacenterKind
nodeLabels := map[string]string{"label1": "foo", "label2": "bar"}
var wnTaints []v1.Taint
wnTaints = append(wnTaints, v1.Taint{Key: "key1", Value: "val1", Effect: "NoSchedule", TimeAdded: nil})
cOld.Spec.WorkerNodeGroupConfigurations[0].Labels = nodeLabels
cOld.Spec.WorkerNodeGroupConfigurations[0].Taints = wnTaints
cNew := cOld.DeepCopy()
cNew.Spec.WorkerNodeGroupConfigurations[0].Labels = map[string]string{}
cNew.Spec.WorkerNodeGroupConfigurations[0].Taints = []v1.Taint{}
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("spec.WorkerNodeConfiguration.labels: Forbidden: field is immutable")))
g.Expect(err).To(MatchError(ContainSubstring("spec.WorkerNodeConfiguration.taints: Forbidden: field is immutable")))
}
func TestClusterValidateUpdateLabelTaintsMultiWNTinkerbellRequest(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "test"
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
cOld.Spec.DatacenterRef.Kind = v1alpha1.TinkerbellDatacenterKind
nodeLabels := map[string]string{"label1": "foo", "label2": "bar"}
nodeLabels2 := map[string]string{"label3": "foo", "label4": "bar"}
cOld.Spec.WorkerNodeGroupConfigurations[0].Labels = nodeLabels
cOld.Spec.WorkerNodeGroupConfigurations[0].Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}}
cOld.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Kind = v1alpha1.TinkerbellMachineConfigKind
cOld.Spec.WorkerNodeGroupConfigurations = append(cOld.Spec.WorkerNodeGroupConfigurations,
v1alpha1.WorkerNodeGroupConfiguration{
Name: "md-1",
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.TinkerbellMachineConfigKind,
Name: "eksa-unit-test",
},
})
cOld.Spec.WorkerNodeGroupConfigurations[1].Labels = nodeLabels2
cOld.Spec.WorkerNodeGroupConfigurations[1].Taints = []v1.Taint{}
cNew := cOld.DeepCopy()
cNew.Spec.WorkerNodeGroupConfigurations[0].Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule", TimeAdded: nil}}
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(BeNil())
}
func TestClusterValidateUpdateSkipUpgradeImmutability(t *testing.T) {
tests := []struct {
Name string
Old *v1alpha1.Cluster
New *v1alpha1.Cluster
Error bool
}{
{
Name: "NilToFalse",
Old: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = nil
}),
New: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(false)
}),
},
{
Name: "FalseToNil",
Old: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(false)
}),
New: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = nil
}),
},
{
Name: "NilToTrue",
Old: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = nil
}),
New: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(true)
}),
},
{
Name: "FalseToTrue",
Old: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(false)
}),
New: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(true)
}),
},
{
Name: "TrueToNil",
Old: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(true)
}),
New: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = nil
}),
Error: true,
},
{
Name: "TrueToFalse",
Old: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(true)
}),
New: baseCluster(func(c *v1alpha1.Cluster) {
c.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(false)
}),
Error: true,
},
}
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
g := NewWithT(t)
err := tc.New.ValidateUpdate(tc.Old)
if !tc.Error {
g.Expect(err).To(Succeed())
} else {
g.Expect(err).To(MatchError(ContainSubstring(
"spec.clusterNetwork.cniConfig.cilium.skipUpgrade: Forbidden: cannot toggle " +
"off skipUpgrade once enabled",
)))
}
})
}
}
func TestClusterValidateUpdateVersionSkew(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "mgmt2"
cOld.Spec.KubernetesVersion = "1.22"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.24"
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("only +1 minor version skew is supported")))
}
func TestClusterValidateUpdateVersionSkewDecrement(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "mgmt2"
cOld.Spec.KubernetesVersion = "1.24"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.23"
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("kubernetes version downgrade is not supported")))
}
func TestClusterValidateUpdateVersionInvalidNew(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "mgmt2"
cOld.Spec.KubernetesVersion = "1.24"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "test"
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("parsing comparison version: could not parse \"test\" as version")))
}
func TestClusterValidateUpdateVersionInvalidOld(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "mgmt2"
cOld.Spec.KubernetesVersion = "test"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.24"
g := NewWithT(t)
err := cNew.ValidateUpdate(cOld)
g.Expect(err).To(MatchError(ContainSubstring("parsing cluster version: could not parse \"test\" as version")))
}
func TestClusterValidateUpdateVersionMinorVersionBump(t *testing.T) {
features.ClearCache()
cOld := baseCluster()
cOld.Spec.ManagementCluster.Name = "mgmt2"
cOld.Spec.KubernetesVersion = "1.24"
cNew := cOld.DeepCopy()
cNew.Spec.KubernetesVersion = "1.25"
g := NewWithT(t)
g.Expect(cNew.ValidateUpdate(cOld)).To(Succeed())
}
func newCluster(opts ...func(*v1alpha1.Cluster)) *v1alpha1.Cluster {
c := baseCluster()
for _, o := range opts {
o(c)
}
return c
}
type clusterOpt func(c *v1alpha1.Cluster)
func baseCluster(opts ...clusterOpt) *v1alpha1.Cluster {
c := &v1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.ClusterKind,
APIVersion: v1alpha1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "mgmt",
},
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: v1alpha1.Kube121,
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
BundlesRef: &v1alpha1.BundlesRef{
Name: "bundles-1",
Namespace: constants.EksaSystemNamespace,
APIVersion: v1alpha1.SchemeBuilder.GroupVersion.String(),
},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
ClusterNetwork: v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}},
Pods: v1alpha1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: v1alpha1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "eksa-unit-test",
},
},
}
for _, opt := range opts {
opt(c)
}
return c
}
| 2,020 |
eks-anywhere | aws | Go | package v1alpha1
// Conditions, condition reasons, and messages for the Cluster object.
const (
// ReadyCondition reports a summary of other conditions, indicating an overall operational
// state of the cluster: all control plane and worker nodes are the right version,
// all nodes are ready, not including old nodes.
ReadyCondition ConditionType = "Ready"
// OutdatedInformationReason reports the system is waiting for stale cluster information to be refreshed.
OutdatedInformationReason = "OutdatedInformation"
// ControlPlaneReadyCondition reports the status on the control plane nodes, indicating all those control plane
// nodes are the right version and are ready, not including the old nodes.
ControlPlaneReadyCondition ConditionType = "ControlPlaneReady"
// ControlPlaneInitializedCondition reports that the first control plane instance has been initialized
// and so the control plane is available and an API server instance is ready for processing requests.
ControlPlaneInitializedCondition ConditionType = "ControlPlaneInitialized"
// ControlPlaneInitializationInProgressReason reports that the control plane initilization is in progress.
ControlPlaneInitializationInProgressReason = "ControlPlaneInitializationInProgress"
// ControlPlaneNotReadyReason reports that the control plane is not ready.
ControlPlaneNotReadyReason = "ControlPlaneNotReady"
// ControlPlaneNotInitializedReason reports that the control plane is not initialized.
ControlPlaneNotInitializedReason = "ControlPlaneNotInitialized"
// WorkersReadyConditon reports the status on the worker nodes, indicating all those worker nodes
// are the right version and are ready, not including the old nodes.
WorkersReadyConditon ConditionType = "WorkersReady"
)
const (
// NodesNotReadyReason reports the Cluster has some nodes that are not ready.
NodesNotReadyReason = "NodesNotReady"
// ScalingUpReason reports the Cluster is increasing the number of replicas for a set of nodes.
ScalingUpReason = "ScalingUp"
// ScalingDownReason reports the Cluster is decreasing the number of replicas for a set of nodes.
ScalingDownReason = "ScalingDown"
// RollingUpgradeInProgress reports the Cluster is executing a rolling upgrading to align the nodes to
// a new desired machine spec.
RollingUpgradeInProgress = "RollingUpgradeInProgress"
)
const (
// DefaultCNIConfiguredCondition reports the default cni cluster has been configured successfully.
DefaultCNIConfiguredCondition ConditionType = "DefaultCNIConfigured"
// DefaultCNIUpgradeInProgressReason used when cluster is upgrading the default CNI.
DefaultCNIUpgradeInProgressReason = "DefaultCNIUpgradeInProgressReason"
// SkipUpgradesForDefaultCNIConfiguredReason used to indicate the custer has been configured to skip
// upgrades for the default cni. The default cni may still be installed, for example to successfully
// create a cluster.
SkipUpgradesForDefaultCNIConfiguredReason = "SkipUpgradesForDefaultCNIConfigured"
)
| 62 |
eks-anywhere | aws | Go | package v1alpha1
import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
type (
// ConditionType is an alias for clusterv1.ConditionType.
ConditionType = clusterv1.ConditionType
// Condition is an alias for clusterv1.Condition.
Condition = clusterv1.Condition
)
| 13 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const DockerDatacenterKind = "DockerDatacenterConfig"
// Used for generating yaml for generate clusterconfig command.
func NewDockerDatacenterConfigGenerate(clusterName string) *DockerDatacenterConfigGenerate {
return &DockerDatacenterConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: DockerDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: clusterName,
},
Spec: DockerDatacenterConfigSpec{},
}
}
func (c *DockerDatacenterConfigGenerate) APIVersion() string {
return c.TypeMeta.APIVersion
}
func (c *DockerDatacenterConfigGenerate) Kind() string {
return c.TypeMeta.Kind
}
func (c *DockerDatacenterConfigGenerate) Name() string {
return c.ObjectMeta.Name
}
func GetDockerDatacenterConfig(fileName string) (*DockerDatacenterConfig, error) {
var clusterConfig DockerDatacenterConfig
err := ParseClusterConfig(fileName, &clusterConfig)
if err != nil {
return nil, err
}
return &clusterConfig, nil
}
| 43 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestGetDockerDatacenterConfig(t *testing.T) {
type args struct {
fileName string
}
tests := []struct {
name string
args args
want *v1alpha1.DockerDatacenterConfig
wantErr bool
}{
{
name: "Good Docker cluster config parse",
args: args{
fileName: "testdata/cluster_docker.yaml",
},
wantErr: false,
want: &v1alpha1.DockerDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.DockerDatacenterKind,
APIVersion: v1alpha1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
},
},
{
name: "Non existent Docker file",
args: args{
fileName: "testdata/cluster_docker_nonexistent.yaml",
},
wantErr: true,
want: nil,
},
{
name: "Bad Docker cluster config",
args: args{
fileName: "testdata/cluster_vsphere.yaml",
},
wantErr: true,
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := v1alpha1.GetDockerDatacenterConfig(tt.args.fileName)
if (err != nil) != tt.wantErr {
t.Errorf("GetDockerDatacenterConfig() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetDockerDatacenterConfig() got = %v, want %v", got, tt.want)
}
})
}
}
| 68 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// DockerDatacenterConfigSpec defines the desired state of DockerDatacenterConfig.
type DockerDatacenterConfigSpec struct { // Important: Run "make generate" to regenerate code after modifying this file
// Foo is an example field of DockerDatacenterConfig. Edit DockerDatacenter_types.go to remove/update
// Foo string `json:"foo,omitempty"`
}
// DockerDatacenterConfigStatus defines the observed state of DockerDatacenterConfig.
type DockerDatacenterConfigStatus struct { // Important: Run "make generate" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// DockerDatacenterConfig is the Schema for the DockerDatacenterConfigs API.
type DockerDatacenterConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DockerDatacenterConfigSpec `json:"spec,omitempty"`
Status DockerDatacenterConfigStatus `json:"status,omitempty"`
}
func (d *DockerDatacenterConfig) Kind() string {
return d.TypeMeta.Kind
}
func (d *DockerDatacenterConfig) ExpectedKind() string {
return DockerDatacenterKind
}
func (d *DockerDatacenterConfig) PauseReconcile() {
if d.Annotations == nil {
d.Annotations = map[string]string{}
}
d.Annotations[pausedAnnotation] = "true"
}
func (d *DockerDatacenterConfig) ClearPauseAnnotation() {
if d.Annotations != nil {
delete(d.Annotations, pausedAnnotation)
}
}
func (d *DockerDatacenterConfig) ConvertConfigToConfigGenerateStruct() *DockerDatacenterConfigGenerate {
namespace := defaultEksaNamespace
if d.Namespace != "" {
namespace = d.Namespace
}
config := &DockerDatacenterConfigGenerate{
TypeMeta: d.TypeMeta,
ObjectMeta: ObjectMeta{
Name: d.Name,
Annotations: d.Annotations,
Namespace: namespace,
},
Spec: d.Spec,
}
return config
}
func (d *DockerDatacenterConfig) Marshallable() Marshallable {
return d.ConvertConfigToConfigGenerateStruct()
}
func (d *DockerDatacenterConfig) Validate() error {
return nil
}
// +kubebuilder:object:generate=false
// Same as DockerDatacenterConfig except stripped down for generation of yaml file during generate clusterconfig.
type DockerDatacenterConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec DockerDatacenterConfigSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// DockerDatacenterConfigList contains a list of DockerDatacenterConfig.
type DockerDatacenterConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DockerDatacenterConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&DockerDatacenterConfig{}, &DockerDatacenterConfigList{})
}
| 100 |
eks-anywhere | aws | Go | package v1alpha1
import (
"errors"
"fmt"
"net/url"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
FluxConfigKind = "FluxConfig"
RsaAlgorithm = "rsa"
EcdsaAlgorithm = "ecdsa"
Ed25519Algorithm = "ed25519"
)
func validateFluxConfig(config *FluxConfig) error {
if config.Spec.Git != nil && config.Spec.Github != nil {
return errors.New("must specify only one provider")
}
if config.Spec.Git == nil && config.Spec.Github == nil {
return errors.New("must specify a provider. Valid options are git and github")
}
if config.Spec.Github != nil {
err := validateGithubProviderConfig(*config.Spec.Github)
if err != nil {
return err
}
}
if config.Spec.Git != nil {
err := validateGitProviderConfig(*config.Spec.Git)
if err != nil {
return err
}
}
if len(config.Spec.Branch) > 0 {
err := validateGitBranchName(config.Spec.Branch)
if err != nil {
return err
}
}
return nil
}
func validateGitProviderConfig(gitProviderConfig GitProviderConfig) error {
if len(gitProviderConfig.RepositoryUrl) <= 0 {
return errors.New("'repositoryUrl' is not set or empty in gitProviderConfig; repositoryUrl is a required field")
}
if len(gitProviderConfig.SshKeyAlgorithm) > 0 {
if err := validateSshKeyAlgorithm(gitProviderConfig.SshKeyAlgorithm); err != nil {
return err
}
} else {
logger.Info("Warning: 'sshKeyAlgorithm' is not set, defaulting to 'ecdsa'")
}
return validateRepositoryUrl(gitProviderConfig.RepositoryUrl)
}
func validateGithubProviderConfig(config GithubProviderConfig) error {
if len(config.Owner) <= 0 {
return errors.New("'owner' is not set or empty in githubProviderConfig; owner is a required field")
}
if len(config.Repository) <= 0 {
return errors.New("'repository' is not set or empty in githubProviderConfig; repository is a required field")
}
err := validateGitRepoName(config.Repository)
if err != nil {
return err
}
return nil
}
func validateRepositoryUrl(repositoryUrl string) error {
url, err := url.Parse(repositoryUrl)
if err != nil {
return fmt.Errorf("unable to parse repository url: %v", err)
}
if url.Scheme != "ssh" {
return fmt.Errorf("invalid repository url scheme: %v", url.Scheme)
}
return nil
}
func validateSshKeyAlgorithm(sshKeyAlgorithm string) error {
if sshKeyAlgorithm != RsaAlgorithm && sshKeyAlgorithm != EcdsaAlgorithm && sshKeyAlgorithm != Ed25519Algorithm {
return fmt.Errorf("'sshKeyAlgorithm' does not have a valid value in gitProviderConfig; sshKeyAlgorithm must be amongst %s, %s, %s", RsaAlgorithm, EcdsaAlgorithm, Ed25519Algorithm)
}
return nil
}
func setFluxConfigDefaults(flux *FluxConfig) {
if flux == nil {
return
}
c := &flux.Spec
if len(c.SystemNamespace) == 0 {
c.SystemNamespace = FluxDefaultNamespace
}
if len(c.Branch) == 0 {
c.Branch = FluxDefaultBranch
}
}
| 109 |
eks-anywhere | aws | Go | package v1alpha1
import (
"errors"
"fmt"
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
EksaGitPrivateKeyTokenEnv = "EKSA_GIT_PRIVATE_KEY"
EksaGitKnownHostsFileEnv = "EKSA_GIT_KNOWN_HOSTS"
)
func TestValidateFluxConfig(t *testing.T) {
tests := []struct {
testName string
fluxConfig *FluxConfig
wantErr bool
gitProvider bool
error error
}{
{
testName: "valid fluxconfig github",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-github",
Namespace: "default",
},
Spec: FluxConfigSpec{
Github: &GithubProviderConfig{
Owner: "janedoe",
Repository: "flux-fleet",
},
},
},
wantErr: false,
error: nil,
},
{
testName: "valid fluxconfig git",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-git",
Namespace: "default",
},
Spec: FluxConfigSpec{
Git: &GitProviderConfig{
RepositoryUrl: "ssh://[email protected]/username/repo.git",
},
},
},
gitProvider: true,
wantErr: false,
error: nil,
},
{
testName: "empty owner",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-github",
Namespace: "default",
},
Spec: FluxConfigSpec{
Github: &GithubProviderConfig{
Repository: "flux-fleet",
},
},
},
wantErr: true,
error: errors.New("'owner' is not set or empty in githubProviderConfig; owner is a required field"),
},
{
testName: "empty repo",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-github",
Namespace: "default",
},
Spec: FluxConfigSpec{
Github: &GithubProviderConfig{
Owner: "janedoe",
},
},
},
wantErr: true,
error: errors.New("'repository' is not set or empty in githubProviderConfig; repository is a required field"),
},
{
testName: "empty repo url",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-git",
Namespace: "default",
},
Spec: FluxConfigSpec{
Git: &GitProviderConfig{
RepositoryUrl: "",
},
},
},
wantErr: true,
error: errors.New("'repositoryUrl' is not set or empty in gitProviderConfig; repositoryUrl is a required field"),
},
{
testName: "invalid repo url",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-git",
Namespace: "default",
},
Spec: FluxConfigSpec{
Git: &GitProviderConfig{
RepositoryUrl: "http://[email protected]/username/repo.git",
},
},
},
wantErr: true,
gitProvider: true,
error: fmt.Errorf("invalid repository url scheme: %s", "http"),
},
{
testName: "invalid sshkey algo",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-git",
Namespace: "default",
},
Spec: FluxConfigSpec{
Git: &GitProviderConfig{
RepositoryUrl: "ssh://[email protected]/username/repo.git",
SshKeyAlgorithm: "invalid",
},
},
},
wantErr: true,
error: fmt.Errorf("'sshKeyAlgorithm' does not have a valid value in gitProviderConfig; sshKeyAlgorithm must be amongst %s, %s, %s", RsaAlgorithm, EcdsaAlgorithm, Ed25519Algorithm),
},
{
testName: "valid ssh key algo",
fluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-flux-git",
Namespace: "default",
},
Spec: FluxConfigSpec{
Git: &GitProviderConfig{
RepositoryUrl: "ssh://[email protected]/username/repo.git",
SshKeyAlgorithm: RsaAlgorithm,
},
},
},
wantErr: false,
gitProvider: true,
error: nil,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if tt.gitProvider {
t.Setenv(EksaGitPrivateKeyTokenEnv, "my/private/key")
t.Setenv(EksaGitKnownHostsFileEnv, "my/known/hosts")
}
err := tt.fluxConfig.Validate()
if (err != nil) != tt.wantErr {
t.Fatalf("FluxConfig.Validate() error = %v, wantErr %v", err, tt.wantErr)
}
if tt.error != nil {
if !reflect.DeepEqual(err, tt.error) {
t.Fatalf("GetAndValidateFluxConfig() = %#v, want %#v", err, tt.error)
}
}
})
}
}
| 211 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FluxConfigSpec defines the desired state of FluxConfig.
type FluxConfigSpec struct {
// SystemNamespace scope for this operation. Defaults to flux-system
SystemNamespace string `json:"systemNamespace,omitempty"`
// ClusterConfigPath relative to the repository root, when specified the cluster sync will be scoped to this path.
ClusterConfigPath string `json:"clusterConfigPath,omitempty"`
// Git branch. Defaults to main.
// +kubebuilder:default:="main"
Branch string `json:"branch,omitempty"`
// Used to specify Github provider to host the Git repo and host the git files
Github *GithubProviderConfig `json:"github,omitempty"`
// Used to specify Git provider that will be used to host the git files
Git *GitProviderConfig `json:"git,omitempty"`
}
type GithubProviderConfig struct {
// Owner is the user or organization name of the Git provider.
Owner string `json:"owner"`
// Repository name.
Repository string `json:"repository"`
// if true, the owner is assumed to be a Git user; otherwise an org.
Personal bool `json:"personal,omitempty"`
}
type GitProviderConfig struct {
// Repository URL for the repository to be used with flux. Can be either an SSH or HTTPS url.
RepositoryUrl string `json:"repositoryUrl"`
// SSH public key algorithm for the private key specified (rsa, ecdsa, ed25519) (default ecdsa)
SshKeyAlgorithm string `json:"sshKeyAlgorithm,omitempty"`
}
// FluxConfigStatus defines the observed state of FluxConfig.
type FluxConfigStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// FluxConfig is the Schema for the fluxconfigs API and defines the configurations of the Flux GitOps Toolkit and
// Git repository it links to.
type FluxConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec FluxConfigSpec `json:"spec,omitempty"`
Status FluxConfigStatus `json:"status,omitempty"`
}
// +kubebuilder:object:generate=false
// Same as FluxConfig except stripped down for generation of yaml file while writing to github repo when flux is enabled.
type FluxConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec FluxConfigSpec `json:"spec,omitempty"`
}
func (e *FluxConfigSpec) Equal(n *FluxConfigSpec) bool {
if e == n {
return true
}
if e == nil || n == nil {
return false
}
if e.SystemNamespace != n.SystemNamespace {
return false
}
if e.Branch != n.Branch {
return false
}
if e.ClusterConfigPath != n.ClusterConfigPath {
return false
}
return e.Git.Equal(n.Git) && e.Github.Equal(n.Github)
}
func (e *GithubProviderConfig) Equal(n *GithubProviderConfig) bool {
if e == n {
return true
}
if e == nil || n == nil {
return false
}
return *e == *n
}
func (e *GitProviderConfig) Equal(n *GitProviderConfig) bool {
if e == n {
return true
}
if e == nil || n == nil {
return false
}
return *e == *n
}
//+kubebuilder:object:root=true
// FluxConfigList contains a list of FluxConfig.
type FluxConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []FluxConfig `json:"items"`
}
func (c *FluxConfig) Kind() string {
return c.TypeMeta.Kind
}
func (c *FluxConfig) ExpectedKind() string {
return FluxConfigKind
}
func (c *FluxConfig) ConvertConfigToConfigGenerateStruct() *FluxConfigGenerate {
namespace := defaultEksaNamespace
if c.Namespace != "" {
namespace = c.Namespace
}
config := &FluxConfigGenerate{
TypeMeta: c.TypeMeta,
ObjectMeta: ObjectMeta{
Name: c.Name,
Annotations: c.Annotations,
Namespace: namespace,
},
Spec: c.Spec,
}
return config
}
func (c *FluxConfig) Validate() error {
return validateFluxConfig(c)
}
func (c *FluxConfig) SetDefaults() {
setFluxConfigDefaults(c)
}
func init() {
SchemeBuilder.Register(&FluxConfig{}, &FluxConfigList{})
}
| 169 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var fluxconfiglog = logf.Log.WithName("fluxconfig-resource")
func (r *FluxConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// Change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-fluxconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=fluxconfigs,verbs=create;update,versions=v1alpha1,name=validation.fluxconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &FluxConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *FluxConfig) ValidateCreate() error {
fluxconfiglog.Info("validate create", "name", r.Name)
if err := r.Validate(); err != nil {
return apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
field.ErrorList{field.Invalid(field.NewPath("spec"), r.Spec, err.Error())})
}
return nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *FluxConfig) ValidateUpdate(old runtime.Object) error {
fluxconfiglog.Info("validate update", "name", r.Name)
oldFluxConfig, ok := old.(*FluxConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a FluxConfig but got a %T", old))
}
var allErrs field.ErrorList
allErrs = append(allErrs, validateImmutableFluxFields(r, oldFluxConfig)...)
if err := r.Validate(); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec"), r.Spec, err.Error()))
}
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind(FluxConfigKind).GroupKind(), r.Name, allErrs)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *FluxConfig) ValidateDelete() error {
fluxconfiglog.Info("validate delete", "name", r.Name)
return nil
}
func validateImmutableFluxFields(new, old *FluxConfig) field.ErrorList {
var allErrs field.ErrorList
if !new.Spec.Equal(&old.Spec) {
allErrs = append(
allErrs,
field.Forbidden(field.NewPath(FluxConfigKind), "config is immutable"),
)
}
return allErrs
}
| 99 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestClusterValidateUpdateFluxRepoImmutable(t *testing.T) {
fOld := fluxConfig()
fOld.Spec.Github = &v1alpha1.GithubProviderConfig{
Repository: "oldRepo",
}
c := fOld.DeepCopy()
c.Spec.Github.Repository = "fancyNewRepo"
f := NewWithT(t)
f.Expect(c.ValidateUpdate(&fOld)).To(MatchError(ContainSubstring("Forbidden: config is immutable")))
}
func TestClusterValidateUpdateFluxRepoUrlImmutable(t *testing.T) {
fOld := fluxConfig()
fOld.Spec.Git = &v1alpha1.GitProviderConfig{
RepositoryUrl: "https://test.git/test",
}
c := fOld.DeepCopy()
c.Spec.Git.RepositoryUrl = "https://test.git/test2"
f := NewWithT(t)
f.Expect(c.ValidateUpdate(&fOld)).To(MatchError(ContainSubstring("Forbidden: config is immutable")))
}
func TestClusterValidateUpdateFluxSshKeyAlgoImmutable(t *testing.T) {
fOld := fluxConfig()
fOld.Spec.Git = &v1alpha1.GitProviderConfig{
RepositoryUrl: "https://test.git/test",
SshKeyAlgorithm: "rsa",
}
c := fOld.DeepCopy()
c.Spec.Git.SshKeyAlgorithm = "rsa2"
f := NewWithT(t)
f.Expect(c.ValidateUpdate(&fOld)).To(MatchError(ContainSubstring("Forbidden: config is immutable")))
}
func TestClusterValidateUpdateFluxBranchImmutable(t *testing.T) {
fOld := fluxConfig()
fOld.Spec.Branch = "oldMain"
c := fOld.DeepCopy()
c.Spec.Branch = "newMain"
f := NewWithT(t)
f.Expect(c.ValidateUpdate(&fOld)).To(MatchError(ContainSubstring("Forbidden: config is immutable")))
}
func TestClusterValidateUpdateFluxSubtractionImmutable(t *testing.T) {
fOld := fluxConfig()
fOld.Spec.Github = &v1alpha1.GithubProviderConfig{
Repository: "oldRepo",
}
c := fOld.DeepCopy()
c.Spec = v1alpha1.FluxConfigSpec{}
f := NewWithT(t)
f.Expect(c.ValidateUpdate(&fOld)).To(MatchError(ContainSubstring("Forbidden: config is immutable")))
}
func TestValidateCreateHasValidatedSpec(t *testing.T) {
fNew := fluxConfig()
fNew.Spec.Git = &v1alpha1.GitProviderConfig{}
fNew.Spec.Github = &v1alpha1.GithubProviderConfig{}
f := NewWithT(t)
err := fNew.ValidateCreate()
f.Expect(apierrors.IsInvalid(err)).To(BeTrue())
f.Expect(err).To(MatchError(ContainSubstring("must specify only one provider")))
}
func TestValidateUpdateHasValidatedSpec(t *testing.T) {
fOld := fluxConfig()
fOld.Spec.Github = &v1alpha1.GithubProviderConfig{
Repository: "oldRepo",
}
c := fOld.DeepCopy()
c.Spec.Git = &v1alpha1.GitProviderConfig{}
f := NewWithT(t)
err := c.ValidateUpdate(&fOld)
f.Expect(apierrors.IsInvalid(err)).To(BeTrue())
f.Expect(err).To(MatchError(ContainSubstring("must specify only one provider")))
}
func fluxConfig() v1alpha1.FluxConfig {
return v1alpha1.FluxConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 1)},
Spec: v1alpha1.FluxConfigSpec{},
Status: v1alpha1.FluxConfigStatus{},
}
}
| 106 |
eks-anywhere | aws | Go | package v1alpha1
import (
"errors"
"fmt"
"regexp"
)
const (
GitOpsConfigKind = "GitOpsConfig"
FluxDefaultNamespace = "flux-system"
FluxDefaultBranch = "main"
)
func validateGitOpsConfig(config *GitOpsConfig) error {
if config == nil {
return errors.New("gitOpsRef is specified but GitOpsConfig is not specified")
}
flux := config.Spec.Flux
if len(flux.Github.Owner) <= 0 {
return errors.New("'owner' is not set or empty in gitOps.flux; owner is a required field")
}
if len(flux.Github.Repository) <= 0 {
return errors.New("'repository' is not set or empty in gitOps.flux; repository is a required field")
}
err := validateGitRepoName(flux.Github.Repository)
if err != nil {
return err
}
if len(flux.Github.Branch) > 0 {
err := validateGitBranchName(config.Spec.Flux.Github.Branch)
if err != nil {
return err
}
}
return nil
}
func validateGitBranchName(branchName string) error {
allowedGitBranchNameRegex := regexp.MustCompile(`^([0-9A-Za-z\_\+,]+)\.?\/?([0-9A-Za-z\-\_\+,]+)$`)
if !allowedGitBranchNameRegex.MatchString(branchName) {
return fmt.Errorf("%s is not a valid git branch name, please check with this documentation https://git-scm.com/docs/git-check-ref-format for valid git branch names", branchName)
}
return nil
}
func validateGitRepoName(repoName string) error {
allowedGitRepoName := regexp.MustCompile(`^([0-9A-Za-z-_.]+)$`)
if !allowedGitRepoName.MatchString(repoName) {
return fmt.Errorf("%s is not a valid git repository name, name can contain only letters, digits, '_', '-' and '.'", repoName)
}
return nil
}
func setGitOpsConfigDefaults(gitops *GitOpsConfig) {
if gitops == nil {
return
}
c := &gitops.Spec.Flux
if len(c.Github.FluxSystemNamespace) == 0 {
c.Github.FluxSystemNamespace = FluxDefaultNamespace
}
if len(c.Github.Branch) == 0 {
c.Github.Branch = FluxDefaultBranch
}
}
| 73 |
eks-anywhere | aws | Go | package v1alpha1
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestValidateGitOpsConfig(t *testing.T) {
tests := []struct {
testName string
gitOpsConfig *GitOpsConfig
wantErr bool
}{
{
testName: "valid gitopsconfig",
gitOpsConfig: &GitOpsConfig{
TypeMeta: metav1.TypeMeta{
Kind: "GitOpsConfig",
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-gitops",
Namespace: "default",
},
Spec: GitOpsConfigSpec{
Flux: Flux{
Github: Github{
Owner: "janedoe",
Repository: "flux-fleet",
},
},
},
},
wantErr: false,
},
{
testName: "empty owner",
gitOpsConfig: &GitOpsConfig{
TypeMeta: metav1.TypeMeta{
Kind: "GitOpsConfig",
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-gitops",
Namespace: "default",
},
Spec: GitOpsConfigSpec{
Flux: Flux{
Github: Github{
Repository: "flux-fleet",
},
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
err := tt.gitOpsConfig.Validate()
if (err != nil) != tt.wantErr {
t.Fatalf("GitOpsConfig.Validate() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestConvertGitOpsConfigToFluxConfig(t *testing.T) {
tests := []struct {
testName string
givenGitOpsConfig *GitOpsConfig
wantFluxConfig *FluxConfig
clusterConfig *Cluster
}{
{
testName: "Convert GitOps Config to FluxConfig",
givenGitOpsConfig: &GitOpsConfig{
TypeMeta: metav1.TypeMeta{
Kind: "GitOpsConfig",
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-gitops",
Namespace: "default",
},
Spec: GitOpsConfigSpec{
Flux: Flux{
Github: Github{
Owner: "janedoe",
Repository: "flux-fleet",
FluxSystemNamespace: "flux-system-test",
Branch: "test-branch",
Personal: false,
ClusterConfigPath: "test-config-path",
},
},
},
},
wantFluxConfig: &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-gitops",
Namespace: "default",
},
Spec: FluxConfigSpec{
SystemNamespace: "flux-system-test",
ClusterConfigPath: "test-config-path",
Branch: "test-branch",
Github: &GithubProviderConfig{
Owner: "janedoe",
Repository: "flux-fleet",
Personal: false,
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
convertedGitOps := tt.givenGitOpsConfig.ConvertToFluxConfig()
if !reflect.DeepEqual(convertedGitOps, tt.wantFluxConfig) {
t.Fatalf("ConvertToFluxConfig() = %#v, want %#v", convertedGitOps, tt.wantFluxConfig)
}
})
}
}
| 134 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GitOps defines the configurations of GitOps Toolkit and Git repository it links to.
type GitOpsConfigSpec struct {
Flux Flux `json:"flux,omitempty"`
}
// Flux defines the Git repository options for Flux v2.
type Flux struct {
// github is the name of the Git Provider to host the Git repo.
Github Github `json:"github,omitempty"`
}
type Github struct {
// Owner is the user or organization name of the Git provider.
Owner string `json:"owner"`
// Repository name.
Repository string `json:"repository"`
// FluxSystemNamespace scope for this operation. Defaults to flux-system.
FluxSystemNamespace string `json:"fluxSystemNamespace,omitempty"`
// Git branch. Defaults to main.
// +kubebuilder:default:="main"
Branch string `json:"branch,omitempty"`
// ClusterConfigPath relative to the repository root, when specified the cluster sync will be scoped to this path.
ClusterConfigPath string `json:"clusterConfigPath,omitempty"`
// if true, the owner is assumed to be a Git user; otherwise an org.
Personal bool `json:"personal,omitempty"`
}
// GitOpsConfigStatus defines the observed state of GitOpsConfig.
type GitOpsConfigStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
type GitOpsConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GitOpsConfigSpec `json:"spec,omitempty"`
Status GitOpsConfigStatus `json:"status,omitempty"`
}
// +kubebuilder:object:generate=false
// Same as GitOpsConfig except stripped down for generation of yaml file while writing to github repo when flux is enabled.
type GitOpsConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec GitOpsConfigSpec `json:"spec,omitempty"`
}
func (e *GitOpsConfigSpec) Equal(n *GitOpsConfigSpec) bool {
if e == n {
return true
}
if e == nil || n == nil {
return false
}
return e.Flux == n.Flux
}
//+kubebuilder:object:root=true
// GitOpsConfigList contains a list of GitOpsConfig.
type GitOpsConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []GitOpsConfig `json:"items"`
}
func (c *GitOpsConfig) Kind() string {
return c.TypeMeta.Kind
}
func (c *GitOpsConfig) ExpectedKind() string {
return GitOpsConfigKind
}
func (c *GitOpsConfig) ConvertToFluxConfig() *FluxConfig {
if c == nil {
return nil
}
config := &FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: FluxConfigKind,
APIVersion: c.APIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: FluxConfigSpec{
SystemNamespace: c.Spec.Flux.Github.FluxSystemNamespace,
Branch: c.Spec.Flux.Github.Branch,
ClusterConfigPath: c.Spec.Flux.Github.ClusterConfigPath,
Github: &GithubProviderConfig{
Owner: c.Spec.Flux.Github.Owner,
Repository: c.Spec.Flux.Github.Repository,
Personal: c.Spec.Flux.Github.Personal,
},
},
}
return config
}
func (c *GitOpsConfig) ConvertConfigToConfigGenerateStruct() *GitOpsConfigGenerate {
namespace := defaultEksaNamespace
if c.Namespace != "" {
namespace = c.Namespace
}
config := &GitOpsConfigGenerate{
TypeMeta: c.TypeMeta,
ObjectMeta: ObjectMeta{
Name: c.Name,
Annotations: c.Annotations,
Namespace: namespace,
},
Spec: c.Spec,
}
return config
}
func (c *GitOpsConfig) Validate() error {
return validateGitOpsConfig(c)
}
func (c *GitOpsConfig) SetDefaults() {
setGitOpsConfigDefaults(c)
}
func init() {
SchemeBuilder.Register(&GitOpsConfig{}, &GitOpsConfigList{})
}
| 145 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var gitopsconfiglog = logf.Log.WithName("gitopsconfig-resource")
func (r *GitOpsConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// Change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-gitopsconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=gitopsconfigs,verbs=create;update,versions=v1alpha1,name=validation.gitopsconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &GitOpsConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *GitOpsConfig) ValidateCreate() error {
gitopsconfiglog.Info("validate create", "name", r.Name)
return nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *GitOpsConfig) ValidateUpdate(old runtime.Object) error {
gitopsconfiglog.Info("validate update", "name", r.Name)
oldGitOpsConfig, ok := old.(*GitOpsConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a GitOpsConfig but got a %T", old))
}
var allErrs field.ErrorList
allErrs = append(allErrs, validateImmutableGitOpsFields(r, oldGitOpsConfig)...)
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind(GitOpsConfigKind).GroupKind(), r.Name, allErrs)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *GitOpsConfig) ValidateDelete() error {
gitopsconfiglog.Info("validate delete", "name", r.Name)
// TODO(user): fill in your validation logic upon object deletion.
return nil
}
func validateImmutableGitOpsFields(new, old *GitOpsConfig) field.ErrorList {
var allErrs field.ErrorList
if !new.Spec.Equal(&old.Spec) {
allErrs = append(
allErrs,
field.Forbidden(field.NewPath(GitOpsConfigKind), "config is immutable"),
)
}
return allErrs
}
| 75 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestClusterValidateUpdateGitOpsRepoImmutable(t *testing.T) {
gOld := gitOpsConfig()
gOld.Spec.Flux.Github.Repository = "oldRepo"
c := gOld.DeepCopy()
c.Spec.Flux.Github.Repository = "fancyNewRepo"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&gOld)).To(MatchError(ContainSubstring("GitOpsConfig: Forbidden: config is immutable")))
}
func TestClusterValidateUpdateGitOpsBranchImmutable(t *testing.T) {
gOld := gitOpsConfig()
gOld.Spec.Flux.Github.Branch = "oldMain"
c := gOld.DeepCopy()
c.Spec.Flux.Github.Repository = "newMain"
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&gOld)).To(MatchError(ContainSubstring("GitOpsConfig: Forbidden: config is immutable")))
}
func TestClusterValidateUpdateGitOpsSubtractionImmutable(t *testing.T) {
gOld := gitOpsConfig()
gOld.Spec.Flux.Github.Repository = "oldRepo"
c := gOld.DeepCopy()
c.Spec = v1alpha1.GitOpsConfigSpec{}
g := NewWithT(t)
g.Expect(c.ValidateUpdate(&gOld)).To(MatchError(ContainSubstring("GitOpsConfig: Forbidden: config is immutable")))
}
func gitOpsConfig() v1alpha1.GitOpsConfig {
return v1alpha1.GitOpsConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 1)},
Spec: v1alpha1.GitOpsConfigSpec{},
Status: v1alpha1.GitOpsConfigStatus{},
}
}
| 50 |
eks-anywhere | aws | Go | // Package v1alpha1 contains API Schema definitions for the anywhere v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=anywhere.eks.amazonaws.com
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "anywhere.eks.amazonaws.com", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
| 21 |
eks-anywhere | aws | Go | package v1alpha1
import (
"crypto/x509"
"encoding/pem"
"fmt"
"net"
"net/url"
"strings"
"github.com/pkg/errors"
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
)
func validateHostOSConfig(config *HostOSConfiguration, osFamily OSFamily) error {
if config == nil {
return nil
}
if err := validateNTPServers(config.NTPConfiguration); err != nil {
return err
}
for _, certBundle := range config.CertBundles {
if err := validateCertBundles(&certBundle, osFamily); err != nil {
return err
}
}
return validateBotterocketConfig(config.BottlerocketConfiguration, osFamily)
}
func validateNTPServers(config *NTPConfiguration) error {
if config == nil {
return nil
}
if len(config.Servers) == 0 {
return errors.New("NTPConfiguration.Servers can not be empty")
}
var invalidServers []string
for _, ntpServer := range config.Servers {
// ParseRequestURI expects a scheme but ntp servers generally don't have one
// Prepending a scheme here so it doesn't fail because of missing scheme
if u, err := url.ParseRequestURI(addNTPScheme(ntpServer)); err != nil || u.Scheme == "" || u.Host == "" {
invalidServers = append(invalidServers, ntpServer)
}
}
if len(invalidServers) != 0 {
return fmt.Errorf("ntp servers [%s] is not valid", strings.Join(invalidServers[:], ", "))
}
return nil
}
func addNTPScheme(server string) string {
if strings.Contains(server, "://") {
return server
}
return fmt.Sprintf("udp://%s", server)
}
func validateCertBundles(config *certBundle, osFamily OSFamily) error {
if config == nil {
return nil
}
if osFamily != Bottlerocket {
return fmt.Errorf("CertBundles can only be used with osFamily: \"%s\"", Bottlerocket)
}
if config.Name == "" {
return errors.New("certBundles name cannot be empty")
}
if err := validateTrustedCertBundle(config.Data); err != nil {
return err
}
return nil
}
func validateBotterocketConfig(config *BottlerocketConfiguration, osFamily OSFamily) error {
if config == nil {
return nil
}
if osFamily != Bottlerocket {
return fmt.Errorf("BottlerocketConfiguration can only be used with osFamily: \"%s\"", Bottlerocket)
}
if err := validateBottlerocketKubernetesConfig(config.Kubernetes); err != nil {
return err
}
if err := validateBottlerocketKernelConfiguration(config.Kernel); err != nil {
return err
}
return validateBottlerocketBootSettingsConfiguration(config.Boot)
}
func validateBottlerocketKubernetesConfig(config *v1beta1.BottlerocketKubernetesSettings) error {
if config == nil {
return nil
}
for _, val := range config.AllowedUnsafeSysctls {
if val == "" {
return errors.New("BottlerocketConfiguration.Kubernetes.AllowedUnsafeSysctls can not have an empty string (\"\")")
}
}
for _, ip := range config.ClusterDNSIPs {
if net.ParseIP(ip) == nil {
return fmt.Errorf("IP address [%s] in BottlerocketConfiguration.Kubernetes.ClusterDNSIPs is not a valid IP", ip)
}
}
if config.MaxPods < 0 {
return errors.New("BottlerocketConfiguration.Kubernetes.MaxPods can not be negative")
}
return nil
}
func validateBottlerocketKernelConfiguration(config *v1beta1.BottlerocketKernelSettings) error {
if config == nil {
return nil
}
for key := range config.SysctlSettings {
if key == "" {
return errors.New("sysctlSettings key cannot be empty")
}
}
return nil
}
func validateBottlerocketBootSettingsConfiguration(config *v1beta1.BottlerocketBootSettings) error {
if config == nil {
return nil
}
for key := range config.BootKernelParameters {
if key == "" {
return fmt.Errorf("bootKernelParameters key cannot be empty")
}
}
return nil
}
// validateTrustedCertBundle validates that the cert is valid.
func validateTrustedCertBundle(certBundle string) error {
var blocks []byte
rest := []byte(certBundle)
// cert bundles could contain more than one certificate
for {
var block *pem.Block
block, rest = pem.Decode(rest)
// no more PEM structed objects
if block == nil {
break
}
blocks = append(blocks, block.Bytes...)
if len(rest) == 0 {
break
}
}
if len(blocks) == 0 {
return fmt.Errorf("failed to parse certificate PEM")
}
_, err := x509.ParseCertificates(blocks)
if err != nil {
return fmt.Errorf("failed to parse certificate: %v", err)
}
return nil
}
| 183 |
eks-anywhere | aws | Go | package v1alpha1
import (
"testing"
. "github.com/onsi/gomega"
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
)
func TestValidateHostOSConfig(t *testing.T) {
tests := []struct {
name string
hostOSConfig *HostOSConfiguration
osFamily OSFamily
wantErr string
}{
{
name: "nil HostOSConfig",
hostOSConfig: nil,
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "empty HostOSConfig",
hostOSConfig: &HostOSConfiguration{},
wantErr: "",
},
{
name: "empty NTP servers",
hostOSConfig: &HostOSConfiguration{
NTPConfiguration: &NTPConfiguration{
Servers: []string{},
},
},
osFamily: Bottlerocket,
wantErr: "NTPConfiguration.Servers can not be empty",
},
{
name: "invalid NTP servers",
hostOSConfig: &HostOSConfiguration{
NTPConfiguration: &NTPConfiguration{
Servers: []string{
"time-a.eks-a.aws",
"not a valid ntp server",
"also invalid",
"udp://",
"time-b.eks-a.aws",
},
},
},
osFamily: Bottlerocket,
wantErr: "ntp servers [not a valid ntp server, also invalid, udp://] is not valid",
},
{
name: "valid NTP config",
hostOSConfig: &HostOSConfiguration{
NTPConfiguration: &NTPConfiguration{
Servers: []string{
"time-a.eks-a.aws",
"time-b.eks-a.aws",
"192.168.0.10",
"2610:20:6f15:15::26",
},
},
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "empty Bottlerocket config",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{},
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "empty Bottlerocket.Kubernetes config",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kubernetes: &v1beta1.BottlerocketKubernetesSettings{},
},
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "empty Bottlerocket.Kubernetes full valid config",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kubernetes: &v1beta1.BottlerocketKubernetesSettings{
AllowedUnsafeSysctls: []string{
"net.core.somaxconn",
"net.ipv4.ip_local_port_range",
},
ClusterDNSIPs: []string{
"1.2.3.4",
"5.6.7.8",
},
MaxPods: 100,
},
},
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "invalid Bottlerocket.Kubernetes.AllowedUnsafeSysctls",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kubernetes: &v1beta1.BottlerocketKubernetesSettings{
AllowedUnsafeSysctls: []string{
"net.core.somaxconn",
"",
},
},
},
},
osFamily: Bottlerocket,
wantErr: "BottlerocketConfiguration.Kubernetes.AllowedUnsafeSysctls can not have an empty string (\"\")",
},
{
name: "invalid Bottlerocket.Kubernetes.ClusterDNSIPs",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kubernetes: &v1beta1.BottlerocketKubernetesSettings{
ClusterDNSIPs: []string{
"1.2.3.4",
"not a valid IP",
},
},
},
},
osFamily: Bottlerocket,
wantErr: "IP address [not a valid IP] in BottlerocketConfiguration.Kubernetes.ClusterDNSIPs is not a valid IP",
},
{
name: "invalid Bottlerocket.Kubernetes.MaxPods",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kubernetes: &v1beta1.BottlerocketKubernetesSettings{
MaxPods: -1,
},
},
},
osFamily: Bottlerocket,
wantErr: "BottlerocketConfiguration.Kubernetes.MaxPods can not be negative",
},
{
name: "Bottlerocket config with non-Bottlerocket OSFamily",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{},
},
osFamily: Ubuntu,
wantErr: "BottlerocketConfiguration can only be used with osFamily: \"bottlerocket\"",
},
{
name: "valid kernel config",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kernel: &v1beta1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"vm.max_map_count": "262144",
"fs.file-max": "65535",
"net.ipv4.tcp_mtu_probing": "1",
},
},
},
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "invalid kernel key value",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kernel: &v1beta1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"": "262144",
},
},
},
},
osFamily: Bottlerocket,
wantErr: "sysctlSettings key cannot be empty",
},
{
name: "valid bootSettings config",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Boot: &v1beta1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"console": {
"tty0",
"ttyS0,115200n8",
},
},
},
},
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "invalid bootSettings config",
hostOSConfig: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Boot: &v1beta1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"": {
"tty0",
"ttyS0,115200n8",
},
},
},
},
},
osFamily: Bottlerocket,
wantErr: "bootKernelParameters key cannot be empty",
},
{
name: "valid cert bundle",
hostOSConfig: &HostOSConfiguration{
CertBundles: []certBundle{
{
Name: "bundle1",
Data: `-----BEGIN CERTIFICATE-----
MIIFrTCCA5WgAwIBAgIUFXtDg6MEuAA0Ns1Ah2pfVKDC/nIwDQYJKoZIhvcNAQEN
BQAwZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMRAwDgYDVQQHDAdTZWF0dGxl
MQ0wCwYDVQQKDARFa3NhMRIwEAYDVQQLDAlQZXJzb25hbGwxFTATBgNVBAMMDDEw
LjgwLjE0OC41NjAeFw0yMzAyMTEwMDQ0MjRaFw0zMzAyMDgwMDQ0MjRaMGYxCzAJ
BgNVBAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTENMAsGA1UE
CgwERWtzYTESMBAGA1UECwwJUGVyc29uYWxsMRUwEwYDVQQDDAwxMC44MC4xNDgu
NTYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDpVLHBdFr7fh+PRQnZ
PlyXbIDfxdKCNZaNfxOl5I2eKW5m+zennt7N+nM1/5oDcVoIzVVCFkSmztl5GNr7
zyKfqDb/q6wZSQTOreNALBEh6redLnzc6OkQYnlFFuLcTuWLqTHdMoEJozbW+9K+
Z3lxKU92FvsTDaZKCT8NWwnKoTeXhEZtOF0KnJmzdQUztL7mNjSn53qUv/6WHwBG
/8F7elkJYP98jhhYkpKgPSnpoDuay3zmQxsFXvh9+j9GztODoroZgkNBuooObsnE
CQEFJLGZ03XAkyaumzfjSD5Ma4QQZPy0VwV2NHL3ngec+wxUH7u+FqupWbQZtJP6
+I3jSGGmhI2G/NIJZD0jiytlR1YmoUYM5qHl/VvrcqKoMEIGgF1ktYup9NAdGzLA
AItBbjDY8Vl+TGMC0vDbrHtVReYOWfx7TBo5nPnBjC1yTtnoYIk8EkJaPY9AuI5V
/WJ866PrPo5dPw/EVu3kuVzG5VoYb+iqY/qENnDnNeP0rCYF/tgna8c6sBkJG2y9
vo8ZkLC22J1CTWNqpiLBg0B9Jn2WIGFKNlavj8RiLSjNMDDj0EzS8mxZqlE+gn+C
MsASCpOAtIKPlr+x+Bfd0xaG1HyzaJV/cFHW8p6+8UvE9Nl0SeTWd8RtG36zXxif
WBDWiUJ9clQSp0F4Yunxts3PFQIDAQABo1MwUTAdBgNVHQ4EFgQUT78n048MqxDw
DdtI+841JN+OQpEwHwYDVR0jBBgwFoAUT78n048MqxDwDdtI+841JN+OQpEwDwYD
VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOCAgEAI3TT8ykTsX4Ra8asa8gl
DqVsz8t5HjOpA2nfAmO9GETg+StEW0j2FGglu+S4OhMx4TN4rYAPY0C78+5GHQwu
UAKxAVXigLXG2TmgszJ1OIBXIRdMlyc3gVm9j5Odcrm0WSGmSe47K8IgEVwlGEoS
CEUfKbMlVzEaPlP7YUOtiElzYi8D4ht9JfmRPi7PjUjVrWw0My+XPrFA4siU8dfh
BbvN9ybHmgltFqeZEqEv2wA9l6EpY71sArWsw+k5OYb2tXiXBxXY9LJzNDdJsNW9
8EP5rFIPvtoZMlE9qAHqy1kkqxcjvhcD3SD8zxJKCDLCJnVPG01k2siwlA46jcTT
xFZhEttbFcASpURjxkvBndXCktN2myWwokNGlf1hosxk2lG5DcySwHAhjXLJV6r2
l7X/CZpR38n/FXAKwiMQAPFZsLRU/EWBPTDlD1zjQSH8weGEj8+e9tQuTm+QsYrb
aJW3puZ84fEgYu/QMjGTuJzd+ZswMcLLyyn4Sm9nvchE8SdEUiF6L0Lc8+qgwmJU
idxqPeX4DMweDcskpZDPbfI6jnNorvGiWaLAYEJ4ntc3SP/lvbwXXOJLvhnRP+Ov
zphcd/PRLS7VpAhWOVbulPjB8DkX0PmvgaCeiTDuajMxq6ve64v+dCwUcqqbamC2
OelAabtJKd8B2BUsR7JRIN8=
-----END CERTIFICATE-----`,
},
},
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "valid nil certBunldes",
hostOSConfig: &HostOSConfiguration{
CertBundles: nil,
},
osFamily: Bottlerocket,
wantErr: "",
},
{
name: "invalid cert no data",
hostOSConfig: &HostOSConfiguration{
CertBundles: []certBundle{
{
Name: "bundle1",
Data: "",
},
},
},
osFamily: Bottlerocket,
wantErr: "failed to parse certificate PEM",
},
{
name: "invalid cert bundle no name",
hostOSConfig: &HostOSConfiguration{
CertBundles: []certBundle{
{
Name: "",
Data: "ABCDEF",
},
{
Name: "bundle2",
Data: "123456",
},
},
},
osFamily: Bottlerocket,
wantErr: "certBundles name cannot be empty",
},
{
name: "invalid cert bundle wrong data type",
hostOSConfig: &HostOSConfiguration{
CertBundles: []certBundle{
{
Name: "bundle1",
Data: "QUJDREVm",
},
},
},
osFamily: Bottlerocket,
wantErr: "failed to parse certificate PEM",
},
{
name: "invalid cert bundle wrong data type",
hostOSConfig: &HostOSConfiguration{
CertBundles: []certBundle{
{
Name: "bundle1",
Data: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBZzhBTUlJQ0NnS0NBZ0VBbFJ1Um5UaFVqVTgvcHJ3WXhidHkKV1BUOXBVUkkzbGJzS01pQjZGbi9WSE9LRTEzcDREOHhnT0NBRHBkUmFnZFQ2bjRldHI5YXR6REtVU3ZwTXRSMwpDUDVub05jOTdXaU5DZ2dCalZXaHM3c3pFZTh1Z3lxRjIzWHdwSFE2dVYxTEtINTBtOTJNYk9XZkN0alU5cC94CnFoTnBRUTFBWmhxTnk1R2V2YXA1azhYelJtalNsZE5BRlpNWTdZdjNHaStueUN3R3dwVnRCVXdodUx6Z05GSy8KeUR0dzJXY1dtVVU3TnVDOFE2TVd2UGVieFZ0Q2ZWcC9pUVU2cTYweXl0NmFHT0JraEFYMExwS0FFaEtpZGl4WQpuUDlQTlZCdnhndTNYWjRQMzZnWlY2K3VtbUtkQlZuYzNOcXdCTHU1K0NjZFJkdXNtSFBIZDVwSGY0LzM4WjMvCjZxVTJhL2ZQdld6Y2VWVEVnWjQ3UWpGTVRDVG1Dd050Mjljdmk3elplUXpqdHdRZ240aXBOOU5pYlJIL0F4L3EKVGJJekhmckoxeGEyUnRlV1NkRmp3dHhpOUMyMEhVa2pYU2VJNFlselFNSDBmUFg2S0NFN2FWZVBUT25CNjlJLwphOS9xOTZEaVhaYWp3bHBxM3dGY3RyczFvWHFCcDVEVnJDSWo4aFUyd05nQjdMdFExbUN0c1l6Ly9oZWFpMEs5ClBoRTRYNmhpRTBZbWVBWmpSMHVIbDhNLzVhVzl4Q29KNzIrMTJrS3BXQWEwU0ZSV0x5NkZlak5ZQ1lwa3VwVkoKeWVjTGsvNEwxVzBsNmpRUVpuV0VyWFpZZTBQTkZjbXdHWHkxUmVwODNrZkJSTktSeTV0dm9jYWxMbHdYTGRVawpBSVUrMkdLanlUM2lNdXpaeHhGeFBGTUNBd0VBQVE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
},
},
},
osFamily: Bottlerocket,
wantErr: "failed to parse certificate",
},
{
name: "more than one cert in one bundle",
hostOSConfig: &HostOSConfiguration{
CertBundles: []certBundle{
{
Name: "bundle1",
Data: `-----BEGIN CERTIFICATE-----
MIIFrTCCA5WgAwIBAgIUFXtDg6MEuAA0Ns1Ah2pfVKDC/nIwDQYJKoZIhvcNAQEN
BQAwZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMRAwDgYDVQQHDAdTZWF0dGxl
MQ0wCwYDVQQKDARFa3NhMRIwEAYDVQQLDAlQZXJzb25hbGwxFTATBgNVBAMMDDEw
LjgwLjE0OC41NjAeFw0yMzAyMTEwMDQ0MjRaFw0zMzAyMDgwMDQ0MjRaMGYxCzAJ
BgNVBAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTENMAsGA1UE
CgwERWtzYTESMBAGA1UECwwJUGVyc29uYWxsMRUwEwYDVQQDDAwxMC44MC4xNDgu
NTYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDpVLHBdFr7fh+PRQnZ
PlyXbIDfxdKCNZaNfxOl5I2eKW5m+zennt7N+nM1/5oDcVoIzVVCFkSmztl5GNr7
zyKfqDb/q6wZSQTOreNALBEh6redLnzc6OkQYnlFFuLcTuWLqTHdMoEJozbW+9K+
Z3lxKU92FvsTDaZKCT8NWwnKoTeXhEZtOF0KnJmzdQUztL7mNjSn53qUv/6WHwBG
/8F7elkJYP98jhhYkpKgPSnpoDuay3zmQxsFXvh9+j9GztODoroZgkNBuooObsnE
CQEFJLGZ03XAkyaumzfjSD5Ma4QQZPy0VwV2NHL3ngec+wxUH7u+FqupWbQZtJP6
+I3jSGGmhI2G/NIJZD0jiytlR1YmoUYM5qHl/VvrcqKoMEIGgF1ktYup9NAdGzLA
AItBbjDY8Vl+TGMC0vDbrHtVReYOWfx7TBo5nPnBjC1yTtnoYIk8EkJaPY9AuI5V
/WJ866PrPo5dPw/EVu3kuVzG5VoYb+iqY/qENnDnNeP0rCYF/tgna8c6sBkJG2y9
vo8ZkLC22J1CTWNqpiLBg0B9Jn2WIGFKNlavj8RiLSjNMDDj0EzS8mxZqlE+gn+C
MsASCpOAtIKPlr+x+Bfd0xaG1HyzaJV/cFHW8p6+8UvE9Nl0SeTWd8RtG36zXxif
WBDWiUJ9clQSp0F4Yunxts3PFQIDAQABo1MwUTAdBgNVHQ4EFgQUT78n048MqxDw
DdtI+841JN+OQpEwHwYDVR0jBBgwFoAUT78n048MqxDwDdtI+841JN+OQpEwDwYD
VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOCAgEAI3TT8ykTsX4Ra8asa8gl
DqVsz8t5HjOpA2nfAmO9GETg+StEW0j2FGglu+S4OhMx4TN4rYAPY0C78+5GHQwu
UAKxAVXigLXG2TmgszJ1OIBXIRdMlyc3gVm9j5Odcrm0WSGmSe47K8IgEVwlGEoS
CEUfKbMlVzEaPlP7YUOtiElzYi8D4ht9JfmRPi7PjUjVrWw0My+XPrFA4siU8dfh
BbvN9ybHmgltFqeZEqEv2wA9l6EpY71sArWsw+k5OYb2tXiXBxXY9LJzNDdJsNW9
8EP5rFIPvtoZMlE9qAHqy1kkqxcjvhcD3SD8zxJKCDLCJnVPG01k2siwlA46jcTT
xFZhEttbFcASpURjxkvBndXCktN2myWwokNGlf1hosxk2lG5DcySwHAhjXLJV6r2
l7X/CZpR38n/FXAKwiMQAPFZsLRU/EWBPTDlD1zjQSH8weGEj8+e9tQuTm+QsYrb
aJW3puZ84fEgYu/QMjGTuJzd+ZswMcLLyyn4Sm9nvchE8SdEUiF6L0Lc8+qgwmJU
idxqPeX4DMweDcskpZDPbfI6jnNorvGiWaLAYEJ4ntc3SP/lvbwXXOJLvhnRP+Ov
zphcd/PRLS7VpAhWOVbulPjB8DkX0PmvgaCeiTDuajMxq6ve64v+dCwUcqqbamC2
OelAabtJKd8B2BUsR7JRIN8=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFrTCCA5WgAwIBAgIUFXtDg6MEuAA0Ns1Ah2pfVKDC/nIwDQYJKoZIhvcNAQEN
BQAwZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMRAwDgYDVQQHDAdTZWF0dGxl
MQ0wCwYDVQQKDARFa3NhMRIwEAYDVQQLDAlQZXJzb25hbGwxFTATBgNVBAMMDDEw
LjgwLjE0OC41NjAeFw0yMzAyMTEwMDQ0MjRaFw0zMzAyMDgwMDQ0MjRaMGYxCzAJ
BgNVBAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTENMAsGA1UE
CgwERWtzYTESMBAGA1UECwwJUGVyc29uYWxsMRUwEwYDVQQDDAwxMC44MC4xNDgu
NTYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDpVLHBdFr7fh+PRQnZ
PlyXbIDfxdKCNZaNfxOl5I2eKW5m+zennt7N+nM1/5oDcVoIzVVCFkSmztl5GNr7
zyKfqDb/q6wZSQTOreNALBEh6redLnzc6OkQYnlFFuLcTuWLqTHdMoEJozbW+9K+
Z3lxKU92FvsTDaZKCT8NWwnKoTeXhEZtOF0KnJmzdQUztL7mNjSn53qUv/6WHwBG
/8F7elkJYP98jhhYkpKgPSnpoDuay3zmQxsFXvh9+j9GztODoroZgkNBuooObsnE
CQEFJLGZ03XAkyaumzfjSD5Ma4QQZPy0VwV2NHL3ngec+wxUH7u+FqupWbQZtJP6
+I3jSGGmhI2G/NIJZD0jiytlR1YmoUYM5qHl/VvrcqKoMEIGgF1ktYup9NAdGzLA
AItBbjDY8Vl+TGMC0vDbrHtVReYOWfx7TBo5nPnBjC1yTtnoYIk8EkJaPY9AuI5V
/WJ866PrPo5dPw/EVu3kuVzG5VoYb+iqY/qENnDnNeP0rCYF/tgna8c6sBkJG2y9
vo8ZkLC22J1CTWNqpiLBg0B9Jn2WIGFKNlavj8RiLSjNMDDj0EzS8mxZqlE+gn+C
MsASCpOAtIKPlr+x+Bfd0xaG1HyzaJV/cFHW8p6+8UvE9Nl0SeTWd8RtG36zXxif
WBDWiUJ9clQSp0F4Yunxts3PFQIDAQABo1MwUTAdBgNVHQ4EFgQUT78n048MqxDw
DdtI+841JN+OQpEwHwYDVR0jBBgwFoAUT78n048MqxDwDdtI+841JN+OQpEwDwYD
VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOCAgEAI3TT8ykTsX4Ra8asa8gl
DqVsz8t5HjOpA2nfAmO9GETg+StEW0j2FGglu+S4OhMx4TN4rYAPY0C78+5GHQwu
UAKxAVXigLXG2TmgszJ1OIBXIRdMlyc3gVm9j5Odcrm0WSGmSe47K8IgEVwlGEoS
CEUfKbMlVzEaPlP7YUOtiElzYi8D4ht9JfmRPi7PjUjVrWw0My+XPrFA4siU8dfh
BbvN9ybHmgltFqeZEqEv2wA9l6EpY71sArWsw+k5OYb2tXiXBxXY9LJzNDdJsNW9
8EP5rFIPvtoZMlE9qAHqy1kkqxcjvhcD3SD8zxJKCDLCJnVPG01k2siwlA46jcTT
xFZhEttbFcASpURjxkvBndXCktN2myWwokNGlf1hosxk2lG5DcySwHAhjXLJV6r2
l7X/CZpR38n/FXAKwiMQAPFZsLRU/EWBPTDlD1zjQSH8weGEj8+e9tQuTm+QsYrb
aJW3puZ84fEgYu/QMjGTuJzd+ZswMcLLyyn4Sm9nvchE8SdEUiF6L0Lc8+qgwmJU
idxqPeX4DMweDcskpZDPbfI6jnNorvGiWaLAYEJ4ntc3SP/lvbwXXOJLvhnRP+Ov
zphcd/PRLS7VpAhWOVbulPjB8DkX0PmvgaCeiTDuajMxq6ve64v+dCwUcqqbamC2
OelAabtJKd8B2BUsR7JRIN8=
-----END CERTIFICATE-----`,
},
},
},
osFamily: Bottlerocket,
wantErr: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := validateHostOSConfig(tt.hostOSConfig, tt.osFamily)
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
| 423 |
eks-anywhere | aws | Go | package v1alpha1
import "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
// HostOSConfiguration defines the configuration settings on the host OS.
type HostOSConfiguration struct {
// +optional
NTPConfiguration *NTPConfiguration `json:"ntpConfiguration,omitempty"`
// +optional
BottlerocketConfiguration *BottlerocketConfiguration `json:"bottlerocketConfiguration,omitempty"`
// +optional
CertBundles []certBundle `json:"certBundles,omitempty"`
}
// NTPConfiguration defines the NTP configuration on the host OS.
type NTPConfiguration struct {
// Servers defines a list of NTP servers to be configured on the host OS.
Servers []string `json:"servers"`
}
// BottlerocketConfiguration defines the Bottlerocket configuration on the host OS.
// These settings only take effect when the `osFamily` is bottlerocket.
type BottlerocketConfiguration struct {
// Kubernetes defines the Kubernetes settings on the host OS.
// +optional
Kubernetes *v1beta1.BottlerocketKubernetesSettings `json:"kubernetes,omitempty"`
// Kernel defines the kernel settings for bottlerocket.
Kernel *v1beta1.BottlerocketKernelSettings `json:"kernel,omitempty"`
// Boot defines the boot settings for bottlerocket.
Boot *v1beta1.BottlerocketBootSettings `json:"boot,omitempty"`
}
// Cert defines additional trusted cert bundles on the host OS.
type certBundle struct {
// Name defines the cert bundle name.
Name string `json:"name"`
// Data defines the cert bundle data.
Data string `json:"data"`
}
| 45 |
eks-anywhere | aws | Go | package v1alpha1
import "fmt"
type OSFamily string
const (
Ubuntu OSFamily = "ubuntu"
Bottlerocket OSFamily = "bottlerocket"
RedHat OSFamily = "redhat"
)
// UserConfiguration defines the configuration of the user to be added to the VM.
type UserConfiguration struct {
Name string `json:"name"`
SshAuthorizedKeys []string `json:"sshAuthorizedKeys"`
}
func defaultMachineConfigUsers(defaultUsername string, users []UserConfiguration) []UserConfiguration {
if len(users) <= 0 {
users = []UserConfiguration{{}}
}
if len(users[0].SshAuthorizedKeys) <= 0 {
users[0].SshAuthorizedKeys = []string{""}
}
if users[0].Name == "" {
users[0].Name = defaultUsername
}
return users
}
func validateMachineConfigUsers(machineConfigName string, machineConfigKind string, users []UserConfiguration) error {
if len(users) == 0 {
return fmt.Errorf("users is not set for %s %s, please provide a user", machineConfigKind, machineConfigName)
}
if users[0].Name == "" {
return fmt.Errorf("users[0].name is not set or is empty for %s %s, please provide a username", machineConfigKind, machineConfigName)
}
if len(users[0].SshAuthorizedKeys) == 0 || users[0].SshAuthorizedKeys[0] == "" {
return fmt.Errorf("users[0].SshAuthorizedKeys is not set or is empty for %s %s, please provide a valid ssh authorized key for user %s", machineConfigKind, machineConfigName, users[0].Name)
}
return nil
}
| 45 |
eks-anywhere | aws | Go | package v1alpha1
// Marshallable represents all "generate" CRDs structs
// +kubebuilder:object:generate=false
type Marshallable interface{}
| 6 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/constants"
)
const NutanixDatacenterKind = "NutanixDatacenterConfig"
// NewNutanixDatacenterConfigGenerate is used for generating yaml for generate clusterconfig command.
func NewNutanixDatacenterConfigGenerate(clusterName string) *NutanixDatacenterConfigGenerate {
return &NutanixDatacenterConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: NutanixDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: clusterName,
},
Spec: NutanixDatacenterConfigSpec{
Endpoint: "<enter Prism Central Endpoint (FQDN or IP) here>",
Port: 9440,
CredentialRef: &Ref{
Kind: constants.SecretKind,
Name: constants.NutanixCredentialsName,
},
},
}
}
func (c *NutanixDatacenterConfigGenerate) APIVersion() string {
return c.TypeMeta.APIVersion
}
func (c *NutanixDatacenterConfigGenerate) Kind() string {
return c.TypeMeta.Kind
}
func (c *NutanixDatacenterConfigGenerate) Name() string {
return c.ObjectMeta.Name
}
// GetNutanixDatacenterConfig parses config in a yaml file and returns a NutanixDatacenterConfig object.
func GetNutanixDatacenterConfig(fileName string) (*NutanixDatacenterConfig, error) {
var clusterConfig NutanixDatacenterConfig
err := ParseClusterConfig(fileName, &clusterConfig)
if err != nil {
return nil, err
}
return &clusterConfig, nil
}
| 53 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
)
func TestGetNutanixDatacenterConfigInvalidConfig(t *testing.T) {
tests := []struct {
name string
fileName string
expectedErr string
}{
{
name: "non-existent-file",
fileName: "testdata/nutanix/non-existent-file.yaml",
expectedErr: "open testdata/nutanix/non-existent-file.yaml: no such file or directory",
},
{
name: "invalid-file",
fileName: "testdata/invalid_format.yaml",
expectedErr: "unable to parse testdata/invalid_format.yaml",
},
{
name: "invalid-cluster-extraneous-field",
fileName: "testdata/nutanix/invalid-cluster.yaml",
expectedErr: "unknown field \"idont\"",
},
{
name: "invalid-kind",
fileName: "testdata/nutanix/invalid-kind.yaml",
expectedErr: "does not contain kind NutanixDatacenterConfig",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf, err := v1alpha1.GetNutanixDatacenterConfig(test.fileName)
assert.Error(t, err)
assert.Nil(t, conf)
assert.Contains(t, err.Error(), test.expectedErr, "expected error", test.expectedErr, "got error", err)
})
}
}
func TestGetNutanixDatacenterConfigValidConfig(t *testing.T) {
expectedDCConf := &v1alpha1.NutanixDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.NutanixDatacenterKind,
APIVersion: v1alpha1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
Namespace: constants.DefaultNamespace,
},
Spec: v1alpha1.NutanixDatacenterConfigSpec{
Endpoint: "prism.nutanix.com",
Port: 9440,
CredentialRef: &v1alpha1.Ref{
Name: "eksa-unit-test",
Kind: constants.SecretKind,
},
},
}
tests := []struct {
name string
fileName string
assertions func(*testing.T, *v1alpha1.NutanixDatacenterConfig)
}{
{
name: "valid-cluster",
fileName: "testdata/nutanix/valid-cluster.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
assert.NoError(t, dcConf.Validate())
assert.Equal(t, expectedDCConf, dcConf)
},
},
{
name: "valid-cluster-extra-delimiter",
fileName: "testdata/nutanix/valid-cluster-extra-delimiter.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
assert.NoError(t, dcConf.Validate())
},
},
{
name: "valid-cluster-setters-getters",
fileName: "testdata/nutanix/valid-cluster.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
assert.Equal(t, dcConf.ExpectedKind(), dcConf.Kind())
assert.False(t, dcConf.IsReconcilePaused())
dcConf.PauseReconcile()
assert.True(t, dcConf.IsReconcilePaused())
dcConf.ClearPauseAnnotation()
assert.False(t, dcConf.IsReconcilePaused())
},
},
{
name: "valid-cluster-marshal",
fileName: "testdata/nutanix/valid-cluster.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
m := dcConf.Marshallable()
require.NotNil(t, m)
y, err := yaml.Marshal(m)
assert.NoError(t, err)
assert.NotNil(t, y)
},
},
{
name: "datacenterconfig-valid-trust-bundle",
fileName: "testdata/nutanix/datacenterconfig-valid-trustbundle.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
assert.NoError(t, dcConf.Validate())
},
},
{
name: "datacenterconfig-invalid-trust-bundle",
fileName: "testdata/nutanix/datacenterconfig-invalid-trustbundle.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
err := dcConf.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "NutanixDatacenterConfig additionalTrustBundle is not valid")
},
},
{
name: "datacenterconfig-non-pem-trust-bundle",
fileName: "testdata/nutanix/datacenterconfig-non-pem-trustbundle.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
err := dcConf.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "could not find a PEM block in the certificate")
},
},
{
name: "datacenterconfig-empty-endpoint",
fileName: "testdata/nutanix/datacenterconfig-empty-endpoint.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
err := dcConf.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "NutanixDatacenterConfig endpoint is not set or is empty")
},
},
{
name: "datacenterconfig-invalid-port",
fileName: "testdata/nutanix/datacenterconfig-invalid-port.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
err := dcConf.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "NutanixDatacenterConfig port is not set or is empty")
},
},
{
name: "datecenterconfig-credentialref-invalid-kind",
fileName: "testdata/nutanix/invalid-credentialref-kind.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
err := dcConf.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "NutanixDatacenterConfig credentialRef Kind (ConfigMap) is not a secret")
},
},
{
name: "datecenterconfig-credentialref-invalid-kind",
fileName: "testdata/nutanix/empty-credentialref-name.yaml",
assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) {
err := dcConf.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "NutanixDatacenterConfig credentialRef name is not set or is empty")
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf, err := v1alpha1.GetNutanixDatacenterConfig(test.fileName)
assert.NoError(t, err)
require.NotNil(t, conf)
test.assertions(t, conf)
})
}
}
func TestNewNutanixDatacenterConfigGenerate(t *testing.T) {
dcConfGen := v1alpha1.NewNutanixDatacenterConfigGenerate("eksa-unit-test")
require.NotNil(t, dcConfGen)
assert.Equal(t, "eksa-unit-test", dcConfGen.Name())
assert.Equal(t, v1alpha1.NutanixDatacenterKind, dcConfGen.Kind())
assert.Equal(t, v1alpha1.SchemeBuilder.GroupVersion.String(), dcConfGen.APIVersion())
assert.Equal(t, constants.NutanixCredentialsName, dcConfGen.Spec.CredentialRef.Name)
}
func TestNutanixDatacenterConfigSetDefaults(t *testing.T) {
dcConf := &v1alpha1.NutanixDatacenterConfig{}
dcConf.SetDefaults()
assert.Equal(t, constants.NutanixCredentialsName, dcConf.Spec.CredentialRef.Name)
assert.Equal(t, constants.SecretKind, dcConf.Spec.CredentialRef.Kind)
}
| 205 |
eks-anywhere | aws | Go | // Important: Run "make generate" to regenerate code after modifying this file
// json tags are required; new fields must have json tags for the fields to be serialized
package v1alpha1
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/constants"
)
// NutanixDatacenterConfigSpec defines the desired state of NutanixDatacenterConfig.
type NutanixDatacenterConfigSpec struct {
// Endpoint is the Endpoint of Nutanix Prism Central
// +kubebuilder:validation:Required
Endpoint string `json:"endpoint"`
// Port is the Port of Nutanix Prism Central
// +kubebuilder:validation:Required
// +kubebuilder:validation:Default=9440
Port int `json:"port"`
// AdditionalTrustBundle is the optional PEM-encoded certificate bundle for
// users that configured their Prism Central with certificates from non-publicly
// trusted CAs
AdditionalTrustBundle string `json:"additionalTrustBundle,omitempty"`
// Insecure is the optional flag to skip TLS verification. Nutanix Prism
// Central installation by default ships with a self-signed certificate
// that will fail TLS verification because the certificate is not issued by
// a public CA and does not have the IP SANs with the Prism Central endpoint.
// To accommodate the scenario where the user has not changed the default
// Certificate that ships with Prism Central, we allow the user to skip TLS
// verification. This is not recommended for production use.
Insecure bool `json:"insecure,omitempty"`
// CredentialRef is the reference to the secret name that contains the credentials
// for the Nutanix Prism Central. The namespace for the secret is assumed to be a constant i.e. eksa-system.
// +optional
CredentialRef *Ref `json:"credentialRef,omitempty"`
}
// NutanixDatacenterConfigStatus defines the observed state of NutanixDatacenterConfig.
type NutanixDatacenterConfigStatus struct{}
// NutanixDatacenterConfig is the Schema for the NutanixDatacenterConfigs API
//
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
type NutanixDatacenterConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec NutanixDatacenterConfigSpec `json:"spec,omitempty"`
Status NutanixDatacenterConfigStatus `json:"status,omitempty"`
}
func (in *NutanixDatacenterConfig) Kind() string {
return in.TypeMeta.Kind
}
func (in *NutanixDatacenterConfig) ExpectedKind() string {
return NutanixDatacenterKind
}
func (in *NutanixDatacenterConfig) PauseReconcile() {
if in.Annotations == nil {
in.Annotations = map[string]string{}
}
in.Annotations[pausedAnnotation] = "true"
}
func (in *NutanixDatacenterConfig) IsReconcilePaused() bool {
if s, ok := in.Annotations[pausedAnnotation]; ok {
return s == "true"
}
return false
}
func (in *NutanixDatacenterConfig) ClearPauseAnnotation() {
if in.Annotations != nil {
delete(in.Annotations, pausedAnnotation)
}
}
func (in *NutanixDatacenterConfig) ConvertConfigToConfigGenerateStruct() *NutanixDatacenterConfigGenerate {
namespace := defaultEksaNamespace
if in.Namespace != "" {
namespace = in.Namespace
}
config := &NutanixDatacenterConfigGenerate{
TypeMeta: in.TypeMeta,
ObjectMeta: ObjectMeta{
Name: in.Name,
Annotations: in.Annotations,
Namespace: namespace,
},
Spec: in.Spec,
}
return config
}
func (in *NutanixDatacenterConfig) Marshallable() Marshallable {
return in.ConvertConfigToConfigGenerateStruct()
}
func (in *NutanixDatacenterConfig) Validate() error {
if len(in.Spec.Endpoint) <= 0 {
return errors.New("NutanixDatacenterConfig endpoint is not set or is empty")
}
if in.Spec.Port == 0 {
return errors.New("NutanixDatacenterConfig port is not set or is empty")
}
if len(in.Spec.AdditionalTrustBundle) > 0 {
certPem := []byte(in.Spec.AdditionalTrustBundle)
block, _ := pem.Decode(certPem)
if block == nil {
return errors.New("NutanixDatacenterConfig additionalTrustBundle is not valid: could not find a PEM block in the certificate")
}
if _, err := x509.ParseCertificates(block.Bytes); err != nil {
return fmt.Errorf("NutanixDatacenterConfig additionalTrustBundle is not valid: %s", err)
}
}
if in.Spec.CredentialRef != nil {
if in.Spec.CredentialRef.Kind != constants.SecretKind {
return fmt.Errorf("NutanixDatacenterConfig credentialRef Kind (%s) is not a secret", in.Spec.CredentialRef.Kind)
}
if len(in.Spec.CredentialRef.Name) <= 0 {
return errors.New("NutanixDatacenterConfig credentialRef name is not set or is empty")
}
}
return nil
}
// SetDefaults sets default values for the NutanixDatacenterConfig object.
func (in *NutanixDatacenterConfig) SetDefaults() {
if in.Spec.CredentialRef == nil {
in.Spec.CredentialRef = &Ref{
Kind: constants.SecretKind,
Name: constants.NutanixCredentialsName,
}
}
}
// NutanixDatacenterConfigGenerate is same as NutanixDatacenterConfig except stripped down for generation of yaml file during generate clusterconfig
//
// +kubebuilder:object:generate=false
type NutanixDatacenterConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec NutanixDatacenterConfigSpec `json:"spec,omitempty"`
}
// NutanixDatacenterConfigList contains a list of NutanixDatacenterConfig
//
// +kubebuilder:object:root=true
type NutanixDatacenterConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []NutanixDatacenterConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&NutanixDatacenterConfig{}, &NutanixDatacenterConfigList{})
}
| 178 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// nutanixdatacenterconfiglog is for logging in this package.
var nutanixdatacenterconfiglog = logf.Log.WithName("nutanixdatacenterconfig-resource")
// SetupWebhookWithManager sets up the webhook with the manager.
func (r *NutanixDatacenterConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-nutanixdatacenterconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=nutanixdatacenterconfigs,verbs=create;update,versions=v1alpha1,name=validation.nutanixdatacenterconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &NutanixDatacenterConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *NutanixDatacenterConfig) ValidateCreate() error {
nutanixdatacenterconfiglog.Info("validate create", "name", r.Name)
if r.IsReconcilePaused() {
nutanixdatacenterconfiglog.Info("NutanixDatacenterConfig is paused, allowing create", "name", r.Name)
return nil
}
if r.Spec.CredentialRef == nil {
return apierrors.NewInvalid(
GroupVersion.WithKind(NutanixDatacenterKind).GroupKind(),
r.Name,
field.ErrorList{
field.Invalid(field.NewPath("spec"), r.Spec, "credentialRef is required to be set to create a new NutanixDatacenterConfig"),
})
}
if err := r.Validate(); err != nil {
return apierrors.NewInvalid(
GroupVersion.WithKind(NutanixDatacenterKind).GroupKind(),
r.Name,
field.ErrorList{
field.Invalid(field.NewPath("spec"), r.Spec, err.Error()),
})
}
return nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *NutanixDatacenterConfig) ValidateUpdate(old runtime.Object) error {
nutanixdatacenterconfiglog.Info("validate update", "name", r.Name)
oldDatacenterConfig, ok := old.(*NutanixDatacenterConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a NutanixDatacenterConfig but got a %T", old))
}
if oldDatacenterConfig.IsReconcilePaused() {
nutanixdatacenterconfiglog.Info("NutanixDatacenterConfig is paused, allowing update", "name", r.Name)
return nil
}
var allErrs field.ErrorList
allErrs = append(allErrs, validateImmutableFieldsNutanixDatacenterConfig(r, oldDatacenterConfig)...)
if r.Spec.CredentialRef == nil {
// check if the old object has a credentialRef set
if oldDatacenterConfig.Spec.CredentialRef != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.credentialRef"), "credentialRef cannot be removed from an existing NutanixDatacenterConfig"))
}
}
if err := r.Validate(); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec"), r.Spec, err.Error()))
}
if len(allErrs) > 0 {
return apierrors.NewInvalid(
GroupVersion.WithKind(NutanixDatacenterKind).GroupKind(),
r.Name,
allErrs)
}
return nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *NutanixDatacenterConfig) ValidateDelete() error {
nutanixdatacenterconfiglog.Info("validate delete", "name", r.Name)
return nil
}
func validateImmutableFieldsNutanixDatacenterConfig(new, old *NutanixDatacenterConfig) field.ErrorList {
var allErrs field.ErrorList
specPath := field.NewPath("spec")
if old.IsReconcilePaused() {
nutanixmachineconfiglog.Info("Reconciliation is paused")
return nil
}
if new.Spec.Endpoint != old.Spec.Endpoint {
allErrs = append(allErrs, field.Forbidden(specPath.Child("endpoint"), "field is immutable"))
}
return allErrs
}
| 116 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"os"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test/envtest"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
)
func TestNutanixDatacenterConfigWebhooksValidateCreate(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
g.Expect(dcConf.ValidateCreate()).To(Succeed())
}
func TestNutanixDatacenterConfigWebhooksValidateCreateReconcilePaused(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
dcConf.Annotations = map[string]string{
"anywhere.eks.amazonaws.com/paused": "true",
}
g.Expect(dcConf.ValidateCreate()).To(Succeed())
}
func TestNutanixDatacenterConfigWebhookValidateCreateNoCredentialRef(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
dcConf.Spec.CredentialRef = nil
g.Expect(dcConf.ValidateCreate().Error()).To(ContainSubstring("credentialRef is required to be set to create a new NutanixDatacenterConfig"))
}
func TestNutanixDatacenterConfigWebhooksValidateCreateValidaitonFailure(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
dcConf.Spec.Endpoint = ""
g.Expect(dcConf.ValidateCreate()).To(Not(Succeed()))
}
func TestNutanixDatacenterConfigWebhooksValidateUpdate(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
g.Expect(dcConf.ValidateCreate()).To(Succeed())
newSpec := nutanixDatacenterConfig()
newSpec.Spec.CredentialRef.Name = "new-credential"
g.Expect(dcConf.ValidateUpdate(newSpec)).To(Succeed())
}
func TestNutanixDatacenterConfigWebhooksValidateUpdateReconcilePaused(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
g.Expect(dcConf.ValidateCreate()).To(Succeed())
oldSpec := nutanixDatacenterConfig()
oldSpec.Annotations = map[string]string{
"anywhere.eks.amazonaws.com/paused": "true",
}
oldSpec.Spec.CredentialRef.Name = "new-credential"
g.Expect(dcConf.ValidateUpdate(oldSpec)).To(Succeed())
}
func TestNutanixDatacenterConfigWebhooksValidateUpdateValidationFailure(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
g.Expect(dcConf.ValidateCreate()).To(Succeed())
newSpec := nutanixDatacenterConfig()
newSpec.Spec.Endpoint = ""
g.Expect(dcConf.ValidateUpdate(newSpec)).To(Not(Succeed()))
}
func TestNutanixDatacenterConfigWebhooksValidateUpdateInvalidOldObject(t *testing.T) {
g := NewWithT(t)
newConf := nutanixDatacenterConfig()
newConf.Spec.CredentialRef = nil
g.Expect(newConf.ValidateUpdate(&v1alpha1.NutanixMachineConfig{}).Error()).To(ContainSubstring("expected a NutanixDatacenterConfig but got a *v1alpha1.NutanixMachineConfig"))
}
func TestNutanixDatacenterConfigWebhooksValidateUpdateCredentialRefRemoved(t *testing.T) {
g := NewWithT(t)
oldConf := nutanixDatacenterConfig()
g.Expect(oldConf.ValidateCreate()).To(Succeed())
newConf := nutanixDatacenterConfig()
newConf.Spec.CredentialRef = nil
g.Expect(newConf.ValidateUpdate(oldConf).Error()).To(ContainSubstring("credentialRef cannot be removed from an existing NutanixDatacenterConfig"))
}
func TestNutanixDatacenterConfigWebhooksValidateDelete(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
g.Expect(dcConf.ValidateCreate()).To(Succeed())
g.Expect(dcConf.ValidateDelete()).To(Succeed())
}
func TestNutanixDatacenterConfigSetupWebhookWithManager(t *testing.T) {
g := NewWithT(t)
dcConf := nutanixDatacenterConfig()
g.Expect(dcConf.SetupWebhookWithManager(env.Manager())).To(Succeed())
}
func nutanixDatacenterConfig() *v1alpha1.NutanixDatacenterConfig {
return &v1alpha1.NutanixDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "nutanix-datacenter-config",
},
Spec: v1alpha1.NutanixDatacenterConfigSpec{
Endpoint: "prism.nutanix.com",
Port: constants.DefaultNutanixPrismCentralPort,
CredentialRef: &v1alpha1.Ref{
Kind: constants.SecretKind,
Name: constants.NutanixCredentialsName,
},
},
}
}
var env *envtest.Environment
func TestMain(m *testing.M) {
os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env)))
}
| 124 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"os"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
// NutanixIdentifierType is an enumeration of different resource identifier types.
type NutanixIdentifierType string
func (c NutanixIdentifierType) String() string {
return string(c)
}
const (
// NutanixMachineConfigKind is the kind for a NutanixMachineConfig.
NutanixMachineConfigKind = "NutanixMachineConfig"
// NutanixIdentifierUUID is a resource identifier identifying the object by UUID.
NutanixIdentifierUUID NutanixIdentifierType = "uuid"
// NutanixIdentifierName is a resource identifier identifying the object by Name.
NutanixIdentifierName NutanixIdentifierType = "name"
defaultNutanixOSFamily = Ubuntu
defaultNutanixSystemDiskSizeGi = "40Gi"
defaultNutanixMemorySizeGi = "4Gi"
defaultNutanixVCPUsPerSocket = 1
defaultNutanixVCPUSockets = 2
// DefaultNutanixMachineConfigUser is the default username we set in machine config.
DefaultNutanixMachineConfigUser string = "eksa"
)
// NutanixResourceIdentifier holds the identity of a Nutanix Prism resource (cluster, image, subnet, etc.)
//
// +union.
type NutanixResourceIdentifier struct {
// Type is the identifier type to use for this resource.
// +kubebuilder:validation:Required
// +kubebuilder:validation:Enum:=uuid;name
Type NutanixIdentifierType `json:"type"`
// uuid is the UUID of the resource in the PC.
// +optional
UUID *string `json:"uuid,omitempty"`
// name is the resource name in the PC
// +optional
Name *string `json:"name,omitempty"`
}
// NutanixCategoryIdentifier holds the identity of a Nutanix Prism Central category.
type NutanixCategoryIdentifier struct {
// key is the Key of the category in the Prism Central.
// +kubebuilder:validation:Required
Key string `json:"key,omitempty"`
// value is the category value linked to the key in the Prism Central.
// +kubebuilder:validation:Required
Value string `json:"value,omitempty"`
}
// NutanixMachineConfigGenerateOpt is a functional option that can be passed to NewNutanixMachineConfigGenerate to
// customize the generated machine config
//
// +kubebuilder:object:generate=false
type NutanixMachineConfigGenerateOpt func(config *NutanixMachineConfigGenerate)
// NewNutanixMachineConfigGenerate returns a new instance of NutanixMachineConfigGenerate
// used for generating yaml for generate clusterconfig command.
func NewNutanixMachineConfigGenerate(name string, opts ...NutanixMachineConfigGenerateOpt) *NutanixMachineConfigGenerate {
enterNameString := "<Enter %s name here>"
machineConfig := &NutanixMachineConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: NutanixMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: name,
},
Spec: NutanixMachineConfigSpec{
OSFamily: defaultNutanixOSFamily,
Users: []UserConfiguration{
{
Name: DefaultNutanixMachineConfigUser,
SshAuthorizedKeys: []string{"ssh-rsa AAAA..."},
},
},
VCPUsPerSocket: defaultNutanixVCPUsPerSocket,
VCPUSockets: defaultNutanixVCPUSockets,
MemorySize: resource.MustParse(defaultNutanixMemorySizeGi),
Image: NutanixResourceIdentifier{Type: NutanixIdentifierName, Name: func() *string { s := fmt.Sprintf(enterNameString, "image"); return &s }()},
Cluster: NutanixResourceIdentifier{Type: NutanixIdentifierName, Name: func() *string { s := fmt.Sprintf(enterNameString, "Prism Element cluster"); return &s }()},
Subnet: NutanixResourceIdentifier{Type: NutanixIdentifierName, Name: func() *string { s := fmt.Sprintf(enterNameString, "subnet"); return &s }()},
SystemDiskSize: resource.MustParse(defaultNutanixSystemDiskSizeGi),
},
}
for _, opt := range opts {
opt(machineConfig)
}
return machineConfig
}
func (c *NutanixMachineConfigGenerate) APIVersion() string {
return c.TypeMeta.APIVersion
}
func (c *NutanixMachineConfigGenerate) Kind() string {
return c.TypeMeta.Kind
}
func (c *NutanixMachineConfigGenerate) Name() string {
return c.ObjectMeta.Name
}
func GetNutanixMachineConfigs(fileName string) (map[string]*NutanixMachineConfig, error) {
configs := make(map[string]*NutanixMachineConfig)
content, err := os.ReadFile(fileName)
if err != nil {
return nil, fmt.Errorf("unable to read file due to: %v", err)
}
for _, c := range strings.Split(string(content), YamlSeparator) {
config := NutanixMachineConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
},
}
if err = yaml.UnmarshalStrict([]byte(c), &config); err == nil {
if config.Kind == NutanixMachineConfigKind {
configs[config.Name] = &config
continue
}
}
_ = yaml.Unmarshal([]byte(c), &config) // this is to check if there is a bad spec in the file
if config.Kind == NutanixMachineConfigKind {
return nil, fmt.Errorf("unable to unmarshall content from file due to: %v", err)
}
}
if len(configs) == 0 {
return nil, fmt.Errorf("unable to find kind %v in file", NutanixMachineConfigKind)
}
return configs, nil
}
func setNutanixMachineConfigDefaults(machineConfig *NutanixMachineConfig) {
initUser := UserConfiguration{
Name: DefaultNutanixMachineConfigUser,
SshAuthorizedKeys: []string{""},
}
if machineConfig.Spec.Users == nil || len(machineConfig.Spec.Users) <= 0 {
machineConfig.Spec.Users = []UserConfiguration{initUser}
}
user := machineConfig.Spec.Users[0]
if user.Name == "" {
machineConfig.Spec.Users[0].Name = DefaultNutanixMachineConfigUser
}
if user.SshAuthorizedKeys == nil || len(user.SshAuthorizedKeys) <= 0 {
machineConfig.Spec.Users[0].SshAuthorizedKeys = []string{""}
}
if machineConfig.Spec.OSFamily == "" {
machineConfig.Spec.OSFamily = defaultNutanixOSFamily
}
}
func validateNutanixMachineConfig(c *NutanixMachineConfig) error {
if err := validateObjectMeta(c.ObjectMeta); err != nil {
return fmt.Errorf("NutanixMachineConfig: %v", err)
}
if err := validateNutanixReferences(c); err != nil {
return fmt.Errorf("NutanixMachineConfig: %v", err)
}
if err := validateMinimumNutanixMachineSpecs(c); err != nil {
return fmt.Errorf("NutanixMachineConfig: %v", err)
}
if c.Spec.OSFamily != Ubuntu {
return fmt.Errorf(
"NutanixMachineConfig: unsupported spec.osFamily (%v); Please use one of the following: %s",
c.Spec.OSFamily,
Ubuntu,
)
}
if err := validateMachineConfigUsers(c.Name, NutanixMachineConfigKind, c.Spec.Users); err != nil {
return err
}
return nil
}
func validateMinimumNutanixMachineSpecs(c *NutanixMachineConfig) error {
if c.Spec.VCPUSockets < defaultNutanixVCPUSockets {
return fmt.Errorf("NutanixMachineConfig: vcpu sockets must be greater than or equal to %d", defaultNutanixVCPUSockets)
}
if c.Spec.VCPUsPerSocket < defaultNutanixVCPUsPerSocket {
return fmt.Errorf("NutanixMachineConfig: vcpu per socket must be greater than or equal to %d", defaultNutanixVCPUsPerSocket)
}
if c.Spec.MemorySize.Cmp(resource.MustParse(defaultNutanixMemorySizeGi)) < 0 {
return fmt.Errorf("NutanixMachineConfig: memory size must be greater than or equal to %s", defaultNutanixMemorySizeGi)
}
if c.Spec.SystemDiskSize.Cmp(resource.MustParse(defaultNutanixSystemDiskSizeGi)) < 0 {
return fmt.Errorf("NutanixMachineConfig: system disk size must be greater than %s", defaultNutanixSystemDiskSizeGi)
}
return nil
}
func validateNutanixReferences(c *NutanixMachineConfig) error {
if err := validateNutanixResourceReference(&c.Spec.Subnet, "subnet", c.Name); err != nil {
return err
}
if err := validateNutanixResourceReference(&c.Spec.Cluster, "cluster", c.Name); err != nil {
return err
}
if err := validateNutanixResourceReference(&c.Spec.Image, "image", c.Name); err != nil {
return err
}
if c.Spec.Project != nil {
if err := validateNutanixResourceReference(c.Spec.Project, "project", c.Name); err != nil {
return err
}
}
if len(c.Spec.AdditionalCategories) > 0 {
if err := validateNutanixCategorySlice(c.Spec.AdditionalCategories, c.Name); err != nil {
return err
}
}
return nil
}
func validateNutanixResourceReference(i *NutanixResourceIdentifier, resource string, mcName string) error {
if i.Type != NutanixIdentifierName && i.Type != NutanixIdentifierUUID {
return fmt.Errorf("NutanixMachineConfig: invalid identifier type for %s: %s", resource, i.Type)
}
if i.Type == NutanixIdentifierName && i.Name == nil {
return fmt.Errorf("NutanixMachineConfig: missing %s name: %s", resource, mcName)
} else if i.Type == NutanixIdentifierUUID && i.UUID == nil {
return fmt.Errorf("NutanixMachineConfig: missing %s UUID: %s", resource, mcName)
}
return nil
}
func validateNutanixCategorySlice(i []NutanixCategoryIdentifier, mcName string) error {
for _, category := range i {
if category.Key == "" {
return fmt.Errorf("NutanixMachineConfig: missing category key: %s", mcName)
}
if category.Value == "" {
return fmt.Errorf("NutanixMachineConfig: missing category value for key %s: %s", category.Key, mcName)
}
}
return nil
}
| 279 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"reflect"
"testing"
"github.com/aws/smithy-go/ptr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
func TestGetNutanixMachineConfigsInvalidConfig(t *testing.T) {
tests := []struct {
name string
fileName string
expectedErr string
}{
{
name: "non-existent-file",
fileName: "testdata/nutanix/non-existent-file.yaml",
expectedErr: "open testdata/nutanix/non-existent-file.yaml: no such file or directory",
},
{
name: "invalid-file",
fileName: "testdata/invalid_format.yaml",
expectedErr: "unable to find kind NutanixMachineConfig in file",
},
{
name: "invalid-cluster-extraneuous-field",
fileName: "testdata/nutanix/invalid-cluster.yaml",
expectedErr: "unknown field \"idont\"",
},
{
name: "invalid kind",
fileName: "testdata/nutanix/invalid-kind.yaml",
expectedErr: "unable to find kind NutanixMachineConfig in file",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf, err := GetNutanixMachineConfigs(test.fileName)
assert.Error(t, err)
assert.Nil(t, conf)
assert.Contains(t, err.Error(), test.expectedErr, "expected error", test.expectedErr, "got error", err)
})
}
}
func TestGetNutanixMachineConfigsValidConfig(t *testing.T) {
expectedMachineConfig := &NutanixMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: NutanixMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
Annotations: map[string]string{},
Namespace: defaultEksaNamespace,
},
Spec: NutanixMachineConfigSpec{
SystemDiskSize: resource.MustParse("40Gi"),
MemorySize: resource.MustParse("8Gi"),
VCPUSockets: 4,
VCPUsPerSocket: 1,
OSFamily: Ubuntu,
Image: NutanixResourceIdentifier{
Type: NutanixIdentifierName,
Name: ptr.String("prism-image"),
},
Cluster: NutanixResourceIdentifier{
Type: NutanixIdentifierName,
Name: ptr.String("prism-element"),
},
Subnet: NutanixResourceIdentifier{
Type: NutanixIdentifierName,
Name: ptr.String("prism-subnet"),
},
Users: []UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
}
const machineConfName = "eksa-unit-test"
tests := []struct {
name string
fileName string
machineConf map[string]*NutanixMachineConfig
assertions func(t *testing.T, machineConf *NutanixMachineConfig)
}{
{
name: "valid-cluster",
fileName: "testdata/nutanix/valid-cluster.yaml",
machineConf: map[string]*NutanixMachineConfig{
machineConfName: expectedMachineConfig,
},
assertions: func(t *testing.T, machineConf *NutanixMachineConfig) {},
},
{
name: "valid-cluster-extra-delimiter",
fileName: "testdata/nutanix/valid-cluster-extra-delimiter.yaml",
machineConf: map[string]*NutanixMachineConfig{
machineConfName: expectedMachineConfig,
},
assertions: func(t *testing.T, machineConf *NutanixMachineConfig) {},
},
{
name: "valid-cluster-setters-getters",
fileName: "testdata/nutanix/valid-cluster.yaml",
machineConf: map[string]*NutanixMachineConfig{
machineConfName: expectedMachineConfig,
},
assertions: func(t *testing.T, machineConf *NutanixMachineConfig) {
assert.False(t, machineConf.IsReconcilePaused())
machineConf.PauseReconcile()
assert.True(t, machineConf.IsReconcilePaused())
assert.False(t, machineConf.IsEtcd())
machineConf.SetEtcd()
assert.True(t, machineConf.IsEtcd())
assert.False(t, machineConf.IsManaged())
machineConf.Annotations = nil
machineConf.SetManagedBy(machineConfName)
assert.True(t, machineConf.IsManaged())
assert.False(t, machineConf.IsControlPlane())
machineConf.SetControlPlane()
assert.True(t, machineConf.IsControlPlane())
assert.Equal(t, Ubuntu, machineConf.OSFamily())
assert.Equal(t, defaultEksaNamespace, machineConf.GetNamespace())
assert.Equal(t, machineConfName, machineConf.GetName())
},
},
{
name: "valid-cluster-marshal",
fileName: "testdata/nutanix/valid-cluster.yaml",
machineConf: map[string]*NutanixMachineConfig{
machineConfName: expectedMachineConfig,
},
assertions: func(t *testing.T, machineConf *NutanixMachineConfig) {
m := machineConf.Marshallable()
require.NotNil(t, m)
y, err := yaml.Marshal(m)
assert.NoError(t, err)
assert.NotNil(t, y)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf, err := GetNutanixMachineConfigs(test.fileName)
assert.NoError(t, err)
require.NotNil(t, conf)
assert.True(t, reflect.DeepEqual(test.machineConf, conf))
test.assertions(t, conf[machineConfName])
})
}
}
func TestNewNutanixMachineConfigGenerate(t *testing.T) {
machineConf := NewNutanixMachineConfigGenerate("eksa-unit-test", func(config *NutanixMachineConfigGenerate) {
config.Spec.MemorySize = resource.MustParse("16Gi")
})
require.NotNil(t, machineConf)
assert.Equal(t, "eksa-unit-test", machineConf.Name())
assert.Equal(t, NutanixMachineConfigKind, machineConf.Kind())
assert.Equal(t, SchemeBuilder.GroupVersion.String(), machineConf.APIVersion())
assert.Equal(t, resource.MustParse("16Gi"), machineConf.Spec.MemorySize)
}
func TestNutanixMachineConfigDefaults(t *testing.T) {
tests := []struct {
name string
fileName string
validate func(t *testing.T, nutanixMachineConfig *NutanixMachineConfig) error
}{
{
name: "machineconfig-with-no-users",
fileName: "testdata/nutanix/machineconfig-with-no-users.yaml",
validate: func(t *testing.T, nutanixMachineConfig *NutanixMachineConfig) error {
if len(nutanixMachineConfig.Spec.Users) <= 0 {
return fmt.Errorf("default user was not added")
}
return nil
},
},
{
name: "machineconfig-with-no-user-name",
fileName: "testdata/nutanix/machineconfig-with-no-user-name.yaml",
validate: func(t *testing.T, nutanixMachineConfig *NutanixMachineConfig) error {
if len(nutanixMachineConfig.Spec.Users[0].Name) <= 0 {
return fmt.Errorf("default user name was not added")
}
return nil
},
},
{
name: "machineconfig-with-no-osfamily",
fileName: "testdata/nutanix/machineconfig-with-no-osfamily.yaml",
validate: func(t *testing.T, nutanixMachineConfig *NutanixMachineConfig) error {
if nutanixMachineConfig.Spec.OSFamily != defaultNutanixOSFamily {
return fmt.Errorf("ubuntu OS family was not set")
}
return nil
},
},
{
name: "machineconfig-with-no-ssh-key",
fileName: "testdata/nutanix/machineconfig-with-no-ssh-key.yaml",
validate: func(t *testing.T, nutanixMachineConfig *NutanixMachineConfig) error {
if len(nutanixMachineConfig.Spec.Users[0].SshAuthorizedKeys) <= 0 {
return fmt.Errorf("default ssh key was not added")
}
if nutanixMachineConfig.Spec.Users[0].SshAuthorizedKeys[0] == "" {
return nil
}
return nil
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf, err := GetNutanixMachineConfigs(test.fileName)
if err != nil {
t.Fatalf("GetNutanixMachineConfigs returned error")
}
if conf == nil {
t.Fatalf("GetNutanixMachineConfigs returned conf without defaults")
}
nutanixMachineConfig := conf["eksa-unit-test"]
if nutanixMachineConfig == nil {
t.Fatalf("Invalid yaml found")
}
nutanixMachineConfig.SetDefaults()
err = test.validate(t, nutanixMachineConfig)
if err != nil {
t.Fatalf("validate failed with error :%s", err)
}
})
}
}
func TestValidateNutanixMachineConfig(t *testing.T) {
tests := []struct {
name string
fileName string
expectedErr string
}{
{
name: "invalid-machineconfig-addtional-categories-key",
fileName: "testdata/nutanix/invalid-machineconfig-addtional-categories-key.yaml",
expectedErr: "NutanixMachineConfig: missing category key",
},
{
name: "invalid-machineconfig-addtional-categories-value",
fileName: "testdata/nutanix/invalid-machineconfig-addtional-categories-value.yaml",
expectedErr: "NutanixMachineConfig: missing category value",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf, err := GetNutanixMachineConfigs(test.fileName)
if err != nil {
t.Fatalf("GetNutanixMachineConfigs returned error")
}
if conf == nil {
t.Fatalf("GetNutanixMachineConfigs returned conf without defaults")
}
nutanixMachineConfig := conf["eksa-unit-test"]
if nutanixMachineConfig == nil {
t.Fatalf("Invalid yaml found")
}
err = nutanixMachineConfig.Validate()
if err == nil {
t.Fatalf("validate should have failed")
}
assert.Contains(t, err.Error(), test.expectedErr, "expected error", test.expectedErr, "got error", err)
})
}
}
| 294 |
eks-anywhere | aws | Go | // Important: Run "make generate" to regenerate code after modifying this file
// json tags are required; new fields must have json tags for the fields to be serialized
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// NutanixMachineConfigSpec defines the desired state of NutanixMachineConfig.
type NutanixMachineConfigSpec struct {
OSFamily OSFamily `json:"osFamily"`
Users []UserConfiguration `json:"users,omitempty"`
// vcpusPerSocket is the number of vCPUs per socket of the VM
// +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum=1
VCPUsPerSocket int32 `json:"vcpusPerSocket"`
// vcpuSockets is the number of vCPU sockets of the VM
// +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum=1
VCPUSockets int32 `json:"vcpuSockets"`
// memorySize is the memory size (in Quantity format) of the VM
// The minimum memorySize is 2Gi bytes
// +kubebuilder:validation:Required
MemorySize resource.Quantity `json:"memorySize"`
// image is to identify the OS image uploaded to the Prism Central (PC)
// The image identifier (uuid or name) can be obtained from the Prism Central console
// or using the Prism Central API.
// +kubebuilder:validation:Required
Image NutanixResourceIdentifier `json:"image"`
// cluster is to identify the cluster (the Prism Element under management
// of the Prism Central), in which the Machine's VM will be created.
// The cluster identifier (uuid or name) can be obtained from the Prism Central console
// or using the prism_central API.
// +kubebuilder:validation:Required
Cluster NutanixResourceIdentifier `json:"cluster"`
// subnet is to identify the cluster's network subnet to use for the Machine's VM
// The cluster identifier (uuid or name) can be obtained from the Prism Central console
// or using the Prism Central API.
// +kubebuilder:validation:Required
Subnet NutanixResourceIdentifier `json:"subnet"`
// Project is an optional property that specifies the Prism Central project so that machine resources
// can be linked to it. The project identifier (uuid or name) can be obtained from the Prism Central console
// or using the Prism Central API.
// +optional
Project *NutanixResourceIdentifier `json:"project,omitempty"`
// systemDiskSize is size (in Quantity format) of the system disk of the VM
// The minimum systemDiskSize is 20Gi bytes
// +kubebuilder:validation:Required
SystemDiskSize resource.Quantity `json:"systemDiskSize"`
// additionalCategories is a list of optional categories to be added to the VM.
// Categories must be created in Prism Central before they can be used.
// +kubebuilder:validation:Optional
AdditionalCategories []NutanixCategoryIdentifier `json:"additionalCategories,omitempty"`
}
// SetDefaults sets defaults to NutanixMachineConfig if user has not provided.
func (in *NutanixMachineConfig) SetDefaults() {
setNutanixMachineConfigDefaults(in)
}
// PauseReconcile pauses the reconciliation of the NutanixMachineConfig.
func (in *NutanixMachineConfig) PauseReconcile() {
in.Annotations[pausedAnnotation] = "true"
}
// IsReconcilePaused returns true if the NutanixMachineConfig is paused.
func (in *NutanixMachineConfig) IsReconcilePaused() bool {
if s, ok := in.Annotations[pausedAnnotation]; ok {
return s == "true"
}
return false
}
// SetControlPlane sets the NutanixMachineConfig as a control plane node.
func (in *NutanixMachineConfig) SetControlPlane() {
if in.Annotations == nil {
in.Annotations = map[string]string{}
}
in.Annotations[controlPlaneAnnotation] = "true"
}
// IsControlPlane returns true if the NutanixMachineConfig is a control plane node.
func (in *NutanixMachineConfig) IsControlPlane() bool {
if s, ok := in.Annotations[controlPlaneAnnotation]; ok {
return s == "true"
}
return false
}
// SetEtcd sets the NutanixMachineConfig as an etcd node.
func (in *NutanixMachineConfig) SetEtcd() {
if in.Annotations == nil {
in.Annotations = map[string]string{}
}
in.Annotations[etcdAnnotation] = "true"
}
// IsEtcd returns true if the NutanixMachineConfig is an etcd node.
func (in *NutanixMachineConfig) IsEtcd() bool {
if s, ok := in.Annotations[etcdAnnotation]; ok {
return s == "true"
}
return false
}
// SetManagedBy sets the cluster name that manages the NutanixMachineConfig.
func (in *NutanixMachineConfig) SetManagedBy(clusterName string) {
if in.Annotations == nil {
in.Annotations = map[string]string{}
}
in.Annotations[managementAnnotation] = clusterName
}
// IsManaged returns true if the NutanixMachineConfig is managed by a cluster.
func (in *NutanixMachineConfig) IsManaged() bool {
if s, ok := in.Annotations[managementAnnotation]; ok {
return s != ""
}
return false
}
// OSFamily returns the OSFamily of the NutanixMachineConfig.
func (in *NutanixMachineConfig) OSFamily() OSFamily {
return in.Spec.OSFamily
}
// GetNamespace returns the namespace of the NutanixMachineConfig.
func (in *NutanixMachineConfig) GetNamespace() string {
return in.Namespace
}
// GetName returns the name of the NutanixMachineConfig.
func (in *NutanixMachineConfig) GetName() string {
return in.Name
}
// NutanixMachineConfigStatus defines the observed state of NutanixMachineConfig.
type NutanixMachineConfigStatus struct {
// Ready is true when the provider resource is ready.
// +optional
Ready bool `json:"ready"`
// Addresses contains the Nutanix VM associated addresses.
// Address type is one of Hostname, ExternalIP, InternalIP, ExternalDNS, InternalDNS
Addresses []capiv1.MachineAddress `json:"addresses,omitempty"`
// The Nutanix VM's UUID
// +optional
VmUUID *string `json:"vmUUID,omitempty"`
// NodeRef is a reference to the corresponding workload cluster Node if it exists.
// +optional
NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"`
// Conditions defines current service state of the NutanixMachine.
// +optional
Conditions capiv1.Conditions `json:"conditions,omitempty"`
}
// NutanixMachineConfig is the Schema for the nutanix machine configs API
//
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
type NutanixMachineConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec NutanixMachineConfigSpec `json:"spec,omitempty"`
Status NutanixMachineConfigStatus `json:"status,omitempty"`
}
// ConvertConfigToConfigGenerateStruct converts the NutanixMachineConfig to NutanixMachineConfigGenerate.
func (in *NutanixMachineConfig) ConvertConfigToConfigGenerateStruct() *NutanixMachineConfigGenerate {
namespace := defaultEksaNamespace
if in.Namespace != "" {
namespace = in.Namespace
}
config := &NutanixMachineConfigGenerate{
TypeMeta: in.TypeMeta,
ObjectMeta: ObjectMeta{
Name: in.Name,
Annotations: in.Annotations,
Namespace: namespace,
},
Spec: in.Spec,
}
return config
}
// Marshallable returns a Marshallable version of the NutanixMachineConfig.
func (in *NutanixMachineConfig) Marshallable() Marshallable {
return in.ConvertConfigToConfigGenerateStruct()
}
// Validate validates the NutanixMachineConfig.
func (in *NutanixMachineConfig) Validate() error {
return validateNutanixMachineConfig(in)
}
// NutanixMachineConfigGenerate is same as NutanixMachineConfig except stripped down for generation of yaml file during
// generate clusterconfig
//
// +kubebuilder:object:generate=false
type NutanixMachineConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec NutanixMachineConfigSpec `json:"spec,omitempty"`
}
// NutanixMachineConfigList contains a list of NutanixMachineConfig
//
// +kubebuilder:object:root=true
type NutanixMachineConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []NutanixMachineConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&NutanixMachineConfig{}, &NutanixMachineConfigList{})
}
| 230 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"reflect"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var nutanixmachineconfiglog = logf.Log.WithName("nutanixmachineconfig-resource")
// SetupWebhookWithManager sets up and registers the webhook with the manager.
func (in *NutanixMachineConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(in).
Complete()
}
var _ webhook.Validator = &NutanixMachineConfig{}
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-nutanixmachineconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=nutanixmachineconfigs,verbs=create;update,versions=v1alpha1,name=validation.nutanixmachineconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (in *NutanixMachineConfig) ValidateCreate() error {
nutanixmachineconfiglog.Info("validate create", "name", in.Name)
if err := in.Validate(); err != nil {
return apierrors.NewInvalid(
GroupVersion.WithKind(NutanixMachineConfigKind).GroupKind(),
in.Name,
field.ErrorList{
field.Invalid(field.NewPath("spec"), in.Spec, err.Error()),
},
)
}
return nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (in *NutanixMachineConfig) ValidateUpdate(old runtime.Object) error {
nutanixmachineconfiglog.Info("validate update", "name", in.Name)
oldNutanixMachineConfig, ok := old.(*NutanixMachineConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a NutanixMachineConfig but got a %T", old))
}
var allErrs field.ErrorList
if err := in.Validate(); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec"), in.Spec, err.Error()))
}
if oldNutanixMachineConfig.IsReconcilePaused() {
nutanixmachineconfiglog.Info("NutanixMachineConfig is paused, so allowing update", "name", in.Name)
if len(allErrs) > 0 {
return apierrors.NewInvalid(
GroupVersion.WithKind(NutanixMachineConfigKind).GroupKind(),
in.Name,
allErrs,
)
}
return nil
}
allErrs = append(allErrs, validateImmutableFieldsNutantixMachineConfig(in, oldNutanixMachineConfig)...)
if len(allErrs) > 0 {
return apierrors.NewInvalid(
GroupVersion.WithKind(NutanixMachineConfigKind).GroupKind(),
in.Name,
allErrs,
)
}
return nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (in *NutanixMachineConfig) ValidateDelete() error {
nutanixmachineconfiglog.Info("validate delete", "name", in.Name)
// TODO(user): fill in your validation logic upon object deletion.
return nil
}
func validateImmutableFieldsNutantixMachineConfig(new, old *NutanixMachineConfig) field.ErrorList {
var allErrs field.ErrorList
specPath := field.NewPath("spec")
if new.Spec.OSFamily != old.Spec.OSFamily {
allErrs = append(allErrs, field.Forbidden(specPath.Child("OSFamily"), "field is immutable"))
}
if !reflect.DeepEqual(new.Spec.Cluster, old.Spec.Cluster) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("Cluster"), "field is immutable"))
}
if !reflect.DeepEqual(new.Spec.Subnet, old.Spec.Subnet) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("Subnet"), "field is immutable"))
}
if old.IsManaged() {
nutanixmachineconfiglog.Info("Machine config is associated with workload cluster", "name", old.Name)
return allErrs
}
if !old.IsEtcd() && !old.IsControlPlane() {
nutanixmachineconfiglog.Info("Machine config is associated with management cluster's worker nodes", "name", old.Name)
return allErrs
}
nutanixmachineconfiglog.Info("Machine config is associated with management cluster's control plane or etcd", "name", old.Name)
if err := validateImmutableFieldsControlPlane(new, old); err != nil {
allErrs = append(allErrs, err...)
}
return allErrs
}
func validateImmutableFieldsControlPlane(new, old *NutanixMachineConfig) field.ErrorList {
var allErrs field.ErrorList
specPath := field.NewPath("spec")
if !reflect.DeepEqual(new.Spec.VCPUSockets, old.Spec.VCPUSockets) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("vCPUSockets"), "field is immutable"))
}
if !reflect.DeepEqual(new.Spec.VCPUsPerSocket, old.Spec.VCPUsPerSocket) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("vCPUsPerSocket"), "field is immutable"))
}
if !reflect.DeepEqual(new.Spec.MemorySize, old.Spec.MemorySize) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("memorySize"), "field is immutable"))
}
if !reflect.DeepEqual(new.Spec.SystemDiskSize, old.Spec.SystemDiskSize) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("systemDiskSize"), "field is immutable"))
}
if !reflect.DeepEqual(new.Spec.Users, old.Spec.Users) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("users"), "field is immutable"))
}
return allErrs
}
| 145 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func nutanixMachineConfig() *v1alpha1.NutanixMachineConfig {
return &v1alpha1.NutanixMachineConfig{
ObjectMeta: v1.ObjectMeta{
Name: "test-nmc",
},
Spec: v1alpha1.NutanixMachineConfigSpec{
OSFamily: v1alpha1.Ubuntu,
VCPUsPerSocket: 2,
VCPUSockets: 4,
MemorySize: resource.MustParse("8Gi"),
Image: v1alpha1.NutanixResourceIdentifier{
Type: v1alpha1.NutanixIdentifierName,
Name: ptr.String("ubuntu-image"),
},
Cluster: v1alpha1.NutanixResourceIdentifier{
Type: v1alpha1.NutanixIdentifierName,
Name: ptr.String("cluster-1"),
},
Subnet: v1alpha1.NutanixResourceIdentifier{
Type: v1alpha1.NutanixIdentifierName,
Name: ptr.String("subnet-1"),
},
Project: &v1alpha1.NutanixResourceIdentifier{
Type: v1alpha1.NutanixIdentifierName,
Name: ptr.String("project-1"),
},
AdditionalCategories: []v1alpha1.NutanixCategoryIdentifier{
{
Key: "category-1",
Value: "value-1",
},
{
Key: "category-2",
Value: "value-2",
},
},
SystemDiskSize: resource.MustParse("100Gi"),
Users: []v1alpha1.UserConfiguration{
{
Name: "test-user",
SshAuthorizedKeys: []string{"ssh AAA..."},
},
},
},
}
}
func TestValidateCreate_Valid(t *testing.T) {
g := NewWithT(t)
config := nutanixMachineConfig()
g.Expect(config.ValidateCreate()).To(Succeed())
}
func TestValidateCreate_Invalid(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
fn func(*v1alpha1.NutanixMachineConfig)
}{
{
name: "invalid name",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Name = ""
},
},
{
name: "invalid os family",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.OSFamily = "invalid"
},
},
{
name: "invalid vcpus per socket",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.VCPUsPerSocket = 0
},
},
{
name: "invalid vcpus sockets",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.VCPUSockets = 0
},
},
{
name: "invalid memory size",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.MemorySize = resource.MustParse("0Gi")
},
},
{
name: "invalid image type",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Image.Type = "invalid"
},
},
{
name: "invalid image name",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Image.Name = nil
},
},
{
name: "invalid cluster type",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Cluster.Type = "invalid"
},
},
{
name: "invalid cluster name",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Cluster.Name = nil
},
},
{
name: "invalid subnet type",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Subnet.Type = "invalid"
},
},
{
name: "invalid subnet name",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Subnet.Name = nil
},
},
{
name: "invalid system disk size",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.SystemDiskSize = resource.MustParse("0Gi")
},
},
{
name: "no user",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Users = []v1alpha1.UserConfiguration{}
},
},
{
name: "no user name",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Users = []v1alpha1.UserConfiguration{
{
SshAuthorizedKeys: []string{"ssh AAA..."},
},
}
},
},
{
name: "no ssh authorized key",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Users = []v1alpha1.UserConfiguration{
{
Name: "eksa",
},
}
},
},
{
name: "invalid ssh authorized key",
fn: func(config *v1alpha1.NutanixMachineConfig) {
config.Spec.Users = []v1alpha1.UserConfiguration{
{
Name: "eksa",
SshAuthorizedKeys: []string{""},
},
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := nutanixMachineConfig()
tt.fn(config)
err := config.ValidateCreate()
g.Expect(err).To(HaveOccurred(), "expected error for %s", tt.name)
})
}
}
func TestNutanixMachineConfigWebhooksValidateUpdateReconcilePaused(t *testing.T) {
g := NewWithT(t)
oldConfig := nutanixMachineConfig()
newConfig := nutanixMachineConfig()
newConfig.Spec.Cluster.Name = ptr.String("new-cluster")
oldConfig.Annotations = map[string]string{
"anywhere.eks.amazonaws.com/paused": "true",
}
g.Expect(newConfig.ValidateUpdate(oldConfig)).To(Succeed())
}
func TestValidateUpdate(t *testing.T) {
g := NewWithT(t)
oldConfig := nutanixMachineConfig()
newConfig := nutanixMachineConfig()
newConfig.Spec.VCPUSockets = 8
g.Expect(newConfig.ValidateUpdate(oldConfig)).To(Succeed())
oldConfig = nutanixMachineConfig()
oldConfig.SetManagedBy("mgmt-cluster")
g.Expect(newConfig.ValidateUpdate(oldConfig)).To(Succeed())
oldConfig = nutanixMachineConfig()
oldConfig.SetControlPlane()
g.Expect(newConfig.ValidateUpdate(oldConfig)).To(HaveOccurred())
}
func TestValidateUpdate_Invalid(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
fn func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig)
}{
{
name: "different os family",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
new.Spec.OSFamily = v1alpha1.Bottlerocket
},
},
{
name: "different cluster",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
new.Spec.Cluster = v1alpha1.NutanixResourceIdentifier{
Type: v1alpha1.NutanixIdentifierName,
Name: ptr.String("cluster-2"),
}
},
},
{
name: "different subnet",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
new.Spec.Subnet = v1alpha1.NutanixResourceIdentifier{
Type: v1alpha1.NutanixIdentifierName,
Name: ptr.String("subnet-2"),
}
},
},
{
name: "old cluster is managed",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
new.Spec.OSFamily = v1alpha1.Bottlerocket
old.SetManagedBy("test")
},
},
{
name: "mismatch vcpu sockets on control plane cluster",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
old.SetControlPlane()
new.Spec.VCPUSockets++
},
},
{
name: "mismatch vcpu per socket on control plane cluster",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
old.SetControlPlane()
new.Spec.VCPUsPerSocket++
},
},
{
name: "mismatch memory size on control plane cluster",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
old.SetControlPlane()
new.Spec.MemorySize.Add(resource.MustParse("1Gi"))
},
},
{
name: "mismatch system disk size on control plane cluster",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
old.SetControlPlane()
new.Spec.SystemDiskSize.Add(resource.MustParse("1Gi"))
},
},
{
name: "mismatch users on control plane cluster",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
old.SetControlPlane()
new.Spec.Users = append(new.Spec.Users, v1alpha1.UserConfiguration{
Name: "another-user",
})
},
},
{
name: "invalid vcpus per socket",
fn: func(new *v1alpha1.NutanixMachineConfig, old *v1alpha1.NutanixMachineConfig) {
new.Spec.VCPUsPerSocket = 0
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oldConfig := nutanixMachineConfig()
newConfig := nutanixMachineConfig()
tt.fn(newConfig, oldConfig)
err := newConfig.ValidateUpdate(oldConfig)
g.Expect(err).To(HaveOccurred(), "expected error for %s", tt.name)
})
}
}
func TestValidateUpdate_OldObjectNotMachineConfig(t *testing.T) {
g := NewWithT(t)
oldConfig := nutanixDatacenterConfig()
newConfig := nutanixMachineConfig()
err := newConfig.ValidateUpdate(oldConfig)
g.Expect(err).To(HaveOccurred())
}
func TestNutanixMachineConfigSetupWebhookWithManager(t *testing.T) {
t.Setenv(features.FullLifecycleAPIEnvVar, "true")
g := NewWithT(t)
conf := nutanixMachineConfig()
g.Expect(conf.SetupWebhookWithManager(env.Manager())).To(Succeed())
}
func TestNutanixMachineConfigWebhooksValidateDelete(t *testing.T) {
g := NewWithT(t)
config := nutanixMachineConfig()
g.Expect(config.ValidateDelete()).To(Succeed())
}
| 335 |
eks-anywhere | aws | Go | package v1alpha1
// Need to override metav1.ObjectMeta as a hack due to the following issue of creationTimestamp being printed:
// https://github.com/kubernetes/kubernetes/issues/86811
// Add more fields based on https://github.com/kubernetes/apimachinery/blob/master/pkg/apis/meta/v1/types.go#L114-L288
// and https://github.com/kubernetes-sigs/cluster-api/blob/bf790fc2a53614ff5d3405c83c0de0dd3303bb1f/api/v1alpha2/common_types.go#L67-L128
// as needed.
type ObjectMeta struct {
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
| 13 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"net/url"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/validation/field"
)
const OIDCConfigKind = "OIDCConfig"
func GetAndValidateOIDCConfig(fileName string, refName string, clusterConfig *Cluster) (*OIDCConfig, error) {
config, err := getOIDCConfig(fileName)
if err != nil {
return nil, err
}
if errs := validateOIDCConfig(config); len(errs) != 0 {
return nil, apierrors.NewInvalid(GroupVersion.WithKind(OIDCConfigKind).GroupKind(), config.Name, errs)
}
if err = validateOIDCRefName(config, refName); err != nil {
return nil, err
}
if err = validateOIDCNamespace(config, clusterConfig); err != nil {
return nil, err
}
return config, nil
}
func getOIDCConfig(fileName string) (*OIDCConfig, error) {
var config OIDCConfig
err := ParseClusterConfig(fileName, &config)
if err != nil {
return nil, err
}
// If the name is empty, we can assume that they didn't configure their OIDC configuration, so return nil
if config.Name == "" {
return nil, nil
}
return &config, nil
}
func validateOIDCConfig(config *OIDCConfig) field.ErrorList {
var errs field.ErrorList
if config == nil {
return nil
}
if config.Spec.ClientId == "" {
errs = append(errs, field.Invalid(field.NewPath("spec", "clientId"), config.Spec.ClientId, "OIDCConfig clientId is required"))
}
if len(config.Spec.RequiredClaims) > 1 {
errs = append(errs, field.Invalid(field.NewPath("spec", "requiredClaims"), config.Spec.RequiredClaims, "only one OIDConfig requiredClaim is supported at this time"))
}
if config.Spec.IssuerUrl == "" {
errs = append(errs, field.Invalid(field.NewPath("spec", "issuerUrl"), config.Spec.IssuerUrl, "OIDCConfig issuerUrl is required"))
return errs
}
u, err := url.ParseRequestURI(config.Spec.IssuerUrl)
if err != nil {
errs = append(errs, field.Invalid(field.NewPath("spec", "issuerUrl"), config.Spec.IssuerUrl, fmt.Sprintf("OIDCConfig issuerUrl is invalid: %v", err)))
return errs
}
if u.Scheme != "https" {
errs = append(errs, field.Invalid(field.NewPath("spec", "issuerUrl"), config.Spec.IssuerUrl, "OIDCConfig issuerUrl should have HTTPS scheme"))
}
return errs
}
func validateOIDCRefName(config *OIDCConfig, refName string) error {
if config == nil {
return nil
}
if config.Name != refName {
return fmt.Errorf("OIDCConfig retrieved with name %v does not match name (%s) specified in "+
"identityProviderRefs", config.Name, refName)
}
return nil
}
func validateOIDCNamespace(config *OIDCConfig, clusterConfig *Cluster) error {
if config == nil {
return nil
}
if config.Namespace != clusterConfig.Namespace {
return fmt.Errorf("OIDCConfig and Cluster objects must have the same namespace specified")
}
return nil
}
| 99 |
eks-anywhere | aws | Go | package v1alpha1
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGetAndValidateOIDCConfig(t *testing.T) {
tests := []struct {
testName string
fileName string
refName string
wantOIDCConfig *OIDCConfig
wantErr bool
}{
{
testName: "file doesn't exist",
fileName: "testdata/fake_file.yaml",
wantOIDCConfig: nil,
wantErr: true,
},
{
testName: "not parseable file",
fileName: "testdata/not_parseable_oidcconfig.yaml",
wantOIDCConfig: nil,
wantErr: true,
},
{
testName: "refName doesn't match",
fileName: "testdata/cluster_1_19_oidc.yaml",
refName: "wrongName",
wantErr: true,
},
{
testName: "valid OIDC",
fileName: "testdata/cluster_1_19_oidc.yaml",
refName: "eksa-unit-test",
wantOIDCConfig: &OIDCConfig{
TypeMeta: metav1.TypeMeta{
Kind: "OIDCConfig",
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: OIDCConfigSpec{
ClientId: "id11",
GroupsClaim: "claim1",
GroupsPrefix: "prefix-for-groups",
IssuerUrl: "https://mydomain.com/issuer",
RequiredClaims: []OIDCConfigRequiredClaim{
{
Claim: "sub",
Value: "test",
},
},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
},
wantErr: false,
},
{
testName: "empty client id",
fileName: "testdata/cluster_invalid_oidc_null_clientid.yaml",
wantOIDCConfig: nil,
wantErr: true,
},
{
testName: "null issuer url",
fileName: "testdata/cluster_invalid_oidc_null_issuer_url.yaml",
wantOIDCConfig: nil,
wantErr: true,
},
{
testName: "invalid issuer url",
fileName: "testdata/cluster_invalid_oidc_bad_issuer_url.yaml",
wantOIDCConfig: nil,
wantErr: true,
},
{
testName: "issuer url non https",
fileName: "testdata/cluster_invalid_oidc_issuer_url_non_https.yaml",
wantOIDCConfig: nil,
wantErr: true,
},
{
testName: "extra required claims",
fileName: "testdata/cluster_oidc_extra_required_claims.yaml",
wantOIDCConfig: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
c := &Cluster{}
got, err := GetAndValidateOIDCConfig(tt.fileName, tt.refName, c)
if (err != nil) != tt.wantErr {
t.Fatalf("GetAndValidateOIDCConfig() error = %v, wantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.wantOIDCConfig) {
t.Fatalf("GetAndValidateOIDCConfig() = %#v, want %#v", got, tt.wantOIDCConfig)
}
})
}
}
| 109 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// OIDCConfig defines an OpenID Connect (OIDCConfigSpec) identity provider configuration
// OIDCConfigSpec defines the desired state of OIDCConfig.
type OIDCConfigSpec struct {
// ClientId defines the client ID for the OpenID Connect client
ClientId string `json:"clientId,omitempty"`
// +kubebuilder:validation:Optional
// GroupsClaim defines the name of a custom OpenID Connect claim for specifying user groups
GroupsClaim string `json:"groupsClaim,omitempty"`
// +kubebuilder:validation:Optional
// GroupsPrefix defines a string to be prefixed to all groups to prevent conflicts with other authentication strategies
GroupsPrefix string `json:"groupsPrefix,omitempty"`
// IssuerUrl defines the URL of the OpenID issuer, only HTTPS scheme will be accepted
IssuerUrl string `json:"issuerUrl,omitempty"`
// +kubebuilder:validation:Optional
// RequiredClaims defines a key=value pair that describes a required claim in the ID Token
RequiredClaims []OIDCConfigRequiredClaim `json:"requiredClaims,omitempty"`
// +kubebuilder:validation:Optional
// UsernameClaim defines the OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable
UsernameClaim string `json:"usernameClaim,omitempty"`
// +kubebuilder:validation:Optional
// UsernamePrefix defines a string to prefixed to all usernames. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.
UsernamePrefix string `json:"usernamePrefix,omitempty"`
}
func (e *OIDCConfigSpec) Equal(n *OIDCConfigSpec) bool {
if e == n {
return true
}
if e == nil || n == nil {
return false
}
if e.ClientId != n.ClientId {
return false
}
if e.GroupsClaim != n.GroupsClaim {
return false
}
if e.GroupsPrefix != n.GroupsPrefix {
return false
}
if e.IssuerUrl != n.IssuerUrl {
return false
}
if e.UsernameClaim != n.UsernameClaim {
return false
}
if e.UsernamePrefix != n.UsernamePrefix {
return false
}
return RequiredClaimsSliceEqual(e.RequiredClaims, n.RequiredClaims)
}
func RequiredClaimsSliceEqual(a, b []OIDCConfigRequiredClaim) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[v.Claim+v.Value]++
}
for _, v := range b {
if _, ok := m[v.Claim+v.Value]; !ok {
return false
}
m[v.Claim+v.Value] -= 1
if m[v.Claim+v.Value] == 0 {
delete(m, v.Claim+v.Value)
}
}
return len(m) == 0
}
// IsManaged returns true if the oidcconfig is associated with a workload cluster.
func (c *OIDCConfig) IsManaged() bool {
if s, ok := c.Annotations[managementAnnotation]; ok {
return s != ""
}
return false
}
func (c *OIDCConfig) SetManagedBy(clusterName string) {
if c.Annotations == nil {
c.Annotations = map[string]string{}
}
c.Annotations[managementAnnotation] = clusterName
}
type OIDCConfigRequiredClaim struct {
Claim string `json:"claim,omitempty"`
Value string `json:"value,omitempty"`
}
// OIDCConfigStatus defines the observed state of OIDCConfig.
type OIDCConfigStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// OIDCConfig is the Schema for the oidcconfigs API.
type OIDCConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec OIDCConfigSpec `json:"spec,omitempty"`
Status OIDCConfigStatus `json:"status,omitempty"`
}
// +kubebuilder:object:generate=false
// Same as OIDCConfig except stripped down for generation of yaml file while writing to github repo when flux is enabled.
type OIDCConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec OIDCConfigSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// OIDCConfigList contains a list of OIDCConfig.
type OIDCConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []OIDCConfig `json:"items"`
}
func (c *OIDCConfig) Kind() string {
return c.TypeMeta.Kind
}
func (c *OIDCConfig) ExpectedKind() string {
return OIDCConfigKind
}
func (c *OIDCConfig) Validate() field.ErrorList {
return validateOIDCConfig(c)
}
func (c *OIDCConfig) ConvertConfigToConfigGenerateStruct() *OIDCConfigGenerate {
namespace := defaultEksaNamespace
if c.Namespace != "" {
namespace = c.Namespace
}
config := &OIDCConfigGenerate{
TypeMeta: c.TypeMeta,
ObjectMeta: ObjectMeta{
Name: c.Name,
Annotations: c.Annotations,
Namespace: namespace,
},
Spec: c.Spec,
}
return config
}
func init() {
SchemeBuilder.Register(&OIDCConfig{}, &OIDCConfigList{})
}
| 168 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var oidcconfiglog = logf.Log.WithName("oidcconfig-resource")
func (r *OIDCConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-oidcconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=oidcconfigs,verbs=create;update,versions=v1alpha1,name=validation.oidcconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &OIDCConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *OIDCConfig) ValidateCreate() error {
oidcconfiglog.Info("validate create", "name", r.Name)
allErrs := r.Validate()
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind(OIDCConfigKind).GroupKind(), r.Name, allErrs)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *OIDCConfig) ValidateUpdate(old runtime.Object) error {
oidcconfiglog.Info("validate update", "name", r.Name)
oldOIDCConfig, ok := old.(*OIDCConfig)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a OIDCConfig but got a %T", old))
}
if oldOIDCConfig.IsManaged() {
clusterlog.Info("OIDC config is associated with workload cluster", "name", oldOIDCConfig.Name)
return nil
}
clusterlog.Info("OIDC config is associated with management cluster", "name", oldOIDCConfig.Name)
var allErrs field.ErrorList
allErrs = append(allErrs, validateImmutableOIDCFields(r, oldOIDCConfig)...)
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind(OIDCConfigKind).GroupKind(), r.Name, allErrs)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *OIDCConfig) ValidateDelete() error {
oidcconfiglog.Info("validate delete", "name", r.Name)
return nil
}
func validateImmutableOIDCFields(new, old *OIDCConfig) field.ErrorList {
var allErrs field.ErrorList
if !new.Spec.Equal(&old.Spec) {
allErrs = append(
allErrs,
field.Forbidden(field.NewPath(OIDCConfigKind), "config is immutable"),
)
}
return allErrs
}
| 87 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestValidateCreateOIDCConfigSuccess(t *testing.T) {
c := oidcConfig()
c.Spec.ClientId = "test"
c.Spec.IssuerUrl = "https://test.com"
o := NewWithT(t)
o.Expect(c.ValidateCreate()).To(Succeed())
}
func TestClusterValidateCreateInvalidOIDCConfig(t *testing.T) {
tests := []struct {
name string
config v1alpha1.OIDCConfig
err string
}{
{
name: "No clientID",
config: v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "",
},
},
err: "clientId is required",
},
{
name: "Null issuerID",
config: v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "test",
IssuerUrl: "",
},
},
err: "issuerUrl is required",
},
{
name: "Invalid issuer url",
config: v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "test",
IssuerUrl: "invalid-url",
},
},
err: "invalid URI for request",
},
{
name: "Issuer url, non https",
config: v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "test",
IssuerUrl: "http://test.com",
},
},
err: "issuerUrl should have HTTPS scheme",
},
{
name: "Extra required claims",
config: v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "test",
IssuerUrl: "https://test.com",
RequiredClaims: []v1alpha1.OIDCConfigRequiredClaim{
{
Claim: "claim1",
Value: "val1",
},
{
Claim: "claim2",
Value: "val2",
},
},
},
},
err: "only one OIDConfig requiredClaim is supported at this time",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.config.ValidateCreate()).To(MatchError(ContainSubstring(tt.err)))
})
}
}
func TestValidateUpdateOIDCClientIdMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.ClientId = "test"
c := ocOld.DeepCopy()
c.Spec.ClientId = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestValidateUpdateOIDCGroupsClaimMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.GroupsClaim = "test"
c := ocOld.DeepCopy()
c.Spec.GroupsClaim = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestValidateUpdateOIDCGroupsPrefixMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.GroupsPrefix = "test"
c := ocOld.DeepCopy()
c.Spec.GroupsPrefix = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestValidateUpdateOIDCIssuerUrlMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.IssuerUrl = "test"
c := ocOld.DeepCopy()
c.Spec.IssuerUrl = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestValidateUpdateOIDCUsernameClaimMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.UsernameClaim = "test"
c := ocOld.DeepCopy()
c.Spec.UsernameClaim = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestValidateUpdateOIDCUsernamePrefixMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.UsernamePrefix = "test"
c := ocOld.DeepCopy()
c.Spec.UsernamePrefix = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestValidateUpdateOIDCRequiredClaimsMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.RequiredClaims = []v1alpha1.OIDCConfigRequiredClaim{{Claim: "test", Value: "value"}}
c := ocOld.DeepCopy()
c.Spec.RequiredClaims = []v1alpha1.OIDCConfigRequiredClaim{{Claim: "test", Value: "value2"}}
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestValidateUpdateOIDCRequiredClaimsMultipleMgmtCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.RequiredClaims = []v1alpha1.OIDCConfigRequiredClaim{{Claim: "test", Value: "value"}}
c := ocOld.DeepCopy()
c.Spec.RequiredClaims = append(c.Spec.RequiredClaims, v1alpha1.OIDCConfigRequiredClaim{
Claim: "test2",
Value: "value2",
})
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(MatchError(ContainSubstring("OIDCConfig: Forbidden: config is immutable")))
}
func TestClusterValidateUpdateOIDCclientIdMutableUpdateNameWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.ClientId = "test"
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.ClientId = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCClientIdWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.ClientId = "test"
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.ClientId = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCGroupsClaimWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.GroupsClaim = "test"
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.GroupsClaim = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCGroupsPrefixWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.GroupsPrefix = "test"
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.GroupsPrefix = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCIssuerUrlWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.IssuerUrl = "test"
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.IssuerUrl = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCUsernameClaimWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.UsernameClaim = "test"
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.UsernameClaim = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCUsernamePrefixWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.UsernamePrefix = "test"
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.UsernamePrefix = "test2"
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCRequiredClaimsWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.RequiredClaims = []v1alpha1.OIDCConfigRequiredClaim{{Claim: "test", Value: "value"}}
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.RequiredClaims = []v1alpha1.OIDCConfigRequiredClaim{{Claim: "test", Value: "value2"}}
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func TestValidateUpdateOIDCRequiredClaimsMultipleWorkloadCluster(t *testing.T) {
ocOld := oidcConfig()
ocOld.Spec.RequiredClaims = []v1alpha1.OIDCConfigRequiredClaim{{Claim: "test", Value: "value"}}
ocOld.SetManagedBy("test")
c := ocOld.DeepCopy()
c.Spec.RequiredClaims = append(c.Spec.RequiredClaims, v1alpha1.OIDCConfigRequiredClaim{
Claim: "test2",
Value: "value2",
})
o := NewWithT(t)
o.Expect(c.ValidateUpdate(&ocOld)).To(Succeed())
}
func oidcConfig() v1alpha1.OIDCConfig {
return v1alpha1.OIDCConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 1)},
Spec: v1alpha1.OIDCConfigSpec{},
Status: v1alpha1.OIDCConfigStatus{},
}
}
| 296 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const SnowDatacenterKind = "SnowDatacenterConfig"
// Used for generating yaml for generate clusterconfig command.
func NewSnowDatacenterConfigGenerate(clusterName string) *SnowDatacenterConfigGenerate {
return &SnowDatacenterConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: SnowDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: clusterName,
},
Spec: SnowDatacenterConfigSpec{},
}
}
func (s *SnowDatacenterConfigGenerate) APIVersion() string {
return s.TypeMeta.APIVersion
}
func (s *SnowDatacenterConfigGenerate) Kind() string {
return s.TypeMeta.Kind
}
func (s *SnowDatacenterConfigGenerate) Name() string {
return s.ObjectMeta.Name
}
func GetSnowDatacenterConfig(fileName string) (*SnowDatacenterConfig, error) {
var clusterConfig SnowDatacenterConfig
err := ParseClusterConfig(fileName, &clusterConfig)
if err != nil {
return nil, err
}
return &clusterConfig, nil
}
| 43 |
eks-anywhere | aws | Go | package v1alpha1
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGetSnowDatacenterConfig(t *testing.T) {
tests := []struct {
name string
fileName string
want *SnowDatacenterConfig
wantErr string
}{
{
name: "file doesn't exist",
fileName: "testdata/fake_file.yaml",
want: nil,
wantErr: "no such file or directory",
},
{
name: "not parseable file",
fileName: "testdata/not_parseable_cluster_snow.yaml",
want: nil,
wantErr: "error unmarshaling JSON: while decoding JSON: json: unknown field",
},
{
name: "valid 1.21",
fileName: "testdata/cluster_1_21_snow.yaml",
want: &SnowDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: SnowDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: SnowDatacenterConfigSpec{},
},
wantErr: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := GetSnowDatacenterConfig(tt.fileName)
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
g.Expect(got).To(Equal(tt.want))
})
}
}
func TestSnowDatacenterConfigValidate(t *testing.T) {
tests := []struct {
name string
obj *SnowDatacenterConfig
wantErr string
}{
{
name: "identity empty",
obj: &SnowDatacenterConfig{
Spec: SnowDatacenterConfigSpec{},
},
wantErr: "SnowDatacenterConfig IdentityRef name must not be empty",
},
{
name: "identity kind empty",
obj: &SnowDatacenterConfig{
Spec: SnowDatacenterConfigSpec{
IdentityRef: Ref{
Name: "test",
},
},
},
wantErr: "SnowDatacenterConfig IdentityRef kind must not be empty",
},
{
name: "valid identity ref",
obj: &SnowDatacenterConfig{
Spec: SnowDatacenterConfigSpec{
IdentityRef: Ref{
Name: "creds-1",
Kind: "Secret",
},
},
},
wantErr: "",
},
{
name: "invalid identity ref kind",
obj: &SnowDatacenterConfig{
Spec: SnowDatacenterConfigSpec{
IdentityRef: Ref{
Name: "creds-1",
Kind: "UnknownKind",
},
},
},
wantErr: "SnowDatacenterConfig IdentityRef kind UnknownKind is invalid",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := tt.obj.Validate()
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
| 121 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
SnowIdentityKind = "Secret"
SnowCredentialsKey = "credentials"
SnowCertificatesKey = "ca-bundle"
)
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// SnowDatacenterConfigSpec defines the desired state of SnowDatacenterConfig.
type SnowDatacenterConfigSpec struct { // Important: Run "make generate" to regenerate code after modifying this file
// IdentityRef is a reference to an identity for the Snow API to be used when reconciling this cluster
IdentityRef Ref `json:"identityRef,omitempty"`
}
// SnowDatacenterConfigStatus defines the observed state of SnowDatacenterConfig.
type SnowDatacenterConfigStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// SnowDatacenterConfig is the Schema for the SnowDatacenterConfigs API.
type SnowDatacenterConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SnowDatacenterConfigSpec `json:"spec,omitempty"`
Status SnowDatacenterConfigStatus `json:"status,omitempty"`
}
func (s *SnowDatacenterConfig) Kind() string {
return s.TypeMeta.Kind
}
func (s *SnowDatacenterConfig) ExpectedKind() string {
return SnowDatacenterKind
}
func (s *SnowDatacenterConfig) PauseReconcile() {
if s.Annotations == nil {
s.Annotations = map[string]string{}
}
s.Annotations[pausedAnnotation] = "true"
}
func (s *SnowDatacenterConfig) ClearPauseAnnotation() {
if s.Annotations != nil {
delete(s.Annotations, pausedAnnotation)
}
}
func (s *SnowDatacenterConfig) Validate() error {
if len(s.Spec.IdentityRef.Name) == 0 {
return fmt.Errorf("SnowDatacenterConfig IdentityRef name must not be empty")
}
if len(s.Spec.IdentityRef.Kind) == 0 {
return fmt.Errorf("SnowDatacenterConfig IdentityRef kind must not be empty")
}
if s.Spec.IdentityRef.Kind != SnowIdentityKind {
return fmt.Errorf("SnowDatacenterConfig IdentityRef kind %s is invalid, the only supported kind is %s", s.Spec.IdentityRef.Kind, SnowIdentityKind)
}
return nil
}
func (s *SnowDatacenterConfig) ConvertConfigToConfigGenerateStruct() *SnowDatacenterConfigGenerate {
namespace := defaultEksaNamespace
if s.Namespace != "" {
namespace = s.Namespace
}
config := &SnowDatacenterConfigGenerate{
TypeMeta: s.TypeMeta,
ObjectMeta: ObjectMeta{
Name: s.Name,
Annotations: s.Annotations,
Namespace: namespace,
},
Spec: s.Spec,
}
return config
}
func (s *SnowDatacenterConfig) Marshallable() Marshallable {
return s.ConvertConfigToConfigGenerateStruct()
}
// +kubebuilder:object:generate=false
// Same as SnowDatacenterConfig except stripped down for generation of yaml file during generate clusterconfig.
type SnowDatacenterConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec SnowDatacenterConfigSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// SnowDatacenterConfigList contains a list of SnowDatacenterConfig.
type SnowDatacenterConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SnowDatacenterConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&SnowDatacenterConfig{}, &SnowDatacenterConfigList{})
}
| 119 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var snowdatacenterconfiglog = logf.Log.WithName("snowdatacenterconfig-resource")
func (r *SnowDatacenterConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-snowdatacenterconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=snowdatacenterconfigs,verbs=create;update,versions=v1alpha1,name=snowdatacenterconfig.kb.io,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &SnowDatacenterConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowDatacenterConfig) ValidateCreate() error {
snowdatacenterconfiglog.Info("validate create", "name", r.Name)
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowDatacenterConfig) ValidateUpdate(old runtime.Object) error {
snowdatacenterconfiglog.Info("validate update", "name", r.Name)
return r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowDatacenterConfig) ValidateDelete() error {
snowdatacenterconfiglog.Info("validate delete", "name", r.Name)
return nil
}
| 58 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestSnowDatacenterConfigValidateCreateValid(t *testing.T) {
g := NewWithT(t)
snowDC := snowDatacenterConfig()
snowDC.Spec.IdentityRef.Name = "refName"
snowDC.Spec.IdentityRef.Kind = v1alpha1.SnowIdentityKind
g.Expect(snowDC.ValidateCreate()).To(Succeed())
}
func TestSnowDatacenterConfigValidateCreateEmptyIdentityRef(t *testing.T) {
g := NewWithT(t)
snowDC := snowDatacenterConfig()
g.Expect(snowDC.ValidateCreate()).To(MatchError(ContainSubstring("IdentityRef name must not be empty")))
}
func TestSnowDatacenterConfigValidateCreateEmptyIdentityKind(t *testing.T) {
g := NewWithT(t)
snowDC := snowDatacenterConfig()
snowDC.Spec.IdentityRef.Name = "refName"
g.Expect(snowDC.ValidateCreate()).To(MatchError(ContainSubstring("IdentityRef kind must not be empty")))
}
func TestSnowDatacenterConfigValidateCreateIdentityKindNotSnow(t *testing.T) {
g := NewWithT(t)
snowDC := snowDatacenterConfig()
snowDC.Spec.IdentityRef.Name = "refName"
snowDC.Spec.IdentityRef.Kind = v1alpha1.OIDCConfigKind
g.Expect(snowDC.ValidateCreate()).To(MatchError(ContainSubstring("is invalid, the only supported kind is Secret")))
}
func TestSnowDatacenterConfigValidateValidateEmptyIdentityRef(t *testing.T) {
g := NewWithT(t)
snowDCOld := snowDatacenterConfig()
snowDCNew := snowDCOld.DeepCopy()
g.Expect(snowDCNew.ValidateUpdate(&snowDCOld)).To(MatchError(ContainSubstring("IdentityRef name must not be empty")))
}
func TestSnowDatacenterConfigValidateValidateEmptyIdentityKind(t *testing.T) {
g := NewWithT(t)
snowDCOld := snowDatacenterConfig()
snowDCNew := snowDCOld.DeepCopy()
snowDCNew.Spec.IdentityRef.Name = "refName"
g.Expect(snowDCNew.ValidateUpdate(&snowDCOld)).To(MatchError(ContainSubstring("IdentityRef kind must not be empty")))
}
func TestSnowDatacenterConfigValidateValidateIdentityKindNotSnow(t *testing.T) {
g := NewWithT(t)
snowDCOld := snowDatacenterConfig()
snowDCNew := snowDCOld.DeepCopy()
snowDCNew.Spec.IdentityRef.Name = "refName"
snowDCNew.Spec.IdentityRef.Kind = v1alpha1.OIDCConfigKind
g.Expect(snowDCNew.ValidateUpdate(&snowDCOld)).To(MatchError(ContainSubstring("is invalid, the only supported kind is Secret")))
}
func snowDatacenterConfig() v1alpha1.SnowDatacenterConfig {
return v1alpha1.SnowDatacenterConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 2)},
Spec: v1alpha1.SnowDatacenterConfigSpec{},
Status: v1alpha1.SnowDatacenterConfigStatus{},
}
}
| 86 |
eks-anywhere | aws | Go | package v1alpha1
import (
"bytes"
"fmt"
"net"
)
const (
// SnowIPPoolKind is the object kind name for SnowIPPool.
SnowIPPoolKind = "SnowIPPool"
)
// SnowIPPoolsSliceEqual compares and returns whether two snow IPPool objects are equal.
func SnowIPPoolsSliceEqual(a, b []IPPool) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[generateKeyForIPPool(v)]++
}
for _, v := range b {
k := generateKeyForIPPool(v)
if _, ok := m[k]; !ok {
return false
}
m[k]--
if m[k] == 0 {
delete(m, k)
}
}
return true
}
func generateKeyForIPPool(pool IPPool) string {
return fmt.Sprintf("%s%s%s%s", pool.IPStart, pool.IPEnd, pool.Subnet, pool.Gateway)
}
func validateSnowIPPool(pool *SnowIPPool) error { //nolint:gocyclo
for index, ipPool := range pool.Spec.Pools {
if len(ipPool.IPStart) == 0 {
return fmt.Errorf("SnowIPPool Pools[%d].IPStart can not be empty", index)
}
ipStart := net.ParseIP(ipPool.IPStart)
if ipStart == nil {
return fmt.Errorf("SnowIPPool Pools[%d].IPStart is invalid", index)
}
if len(ipPool.IPEnd) == 0 {
return fmt.Errorf("SnowIPPool Pools[%d].IPEnd can not be empty", index)
}
ipEnd := net.ParseIP(ipPool.IPEnd)
if ipEnd == nil {
return fmt.Errorf("SnowIPPool Pools[%d].IPEnd is invalid", index)
}
if len(ipPool.Gateway) == 0 {
return fmt.Errorf("SnowIPPool Pools[%d].Gateway can not be empty", index)
}
gateway := net.ParseIP(ipPool.Gateway)
if gateway == nil {
return fmt.Errorf("SnowIPPool Pools[%d].Gateway is invalid", index)
}
if bytes.Compare(ipStart, ipEnd) >= 0 {
return fmt.Errorf("SnowIPPool Pools[%d].IPStart should be smaller than IPEnd", index)
}
if len(ipPool.Subnet) == 0 {
return fmt.Errorf("SnowIPPool Pools[%d].Subnet can not be empty", index)
}
_, ipNet, err := net.ParseCIDR(ipPool.Subnet)
if err != nil {
return fmt.Errorf("SnowIPPool Pools[%d].Subnet is invalid: %v", index, err)
}
if !ipNet.Contains(ipStart) {
return fmt.Errorf("SnowIPPool Pools[%d].IPStart should be within the subnet range %s", index, ipPool.Subnet)
}
if !ipNet.Contains(ipEnd) {
return fmt.Errorf("SnowIPPool Pools[%d].IPEnd should be within the subnet range %s", index, ipPool.Subnet)
}
}
return nil
}
| 94 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestSnowIPPoolConvertConfigToConfigGenerateStruct(t *testing.T) {
g := NewWithT(t)
s := &v1alpha1.SnowIPPool{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.SnowIPPoolKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "ippool",
},
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "start",
IPEnd: "end",
Gateway: "gateway",
Subnet: "subnet",
},
},
},
}
want := &v1alpha1.SnowIPPoolGenerate{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.SnowIPPoolKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1alpha1.ObjectMeta{
Name: "ippool",
Namespace: "default",
},
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "start",
IPEnd: "end",
Gateway: "gateway",
Subnet: "subnet",
},
},
},
}
g.Expect(s.ConvertConfigToConfigGenerateStruct()).To(Equal(want))
}
func TestSnowIPPoolValidate(t *testing.T) {
tests := []struct {
name string
obj *v1alpha1.SnowIPPool
wantErr string
}{
{
name: "valid ip pool",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
IPEnd: "1.2.3.5",
Gateway: "1.2.3.1",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "",
},
{
name: "ip start empty",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPEnd: "1.2.3.5",
Gateway: "1.2.3.1",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].IPStart can not be empty",
},
{
name: "ip start invalid",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "invalid",
IPEnd: "1.2.3.5",
Gateway: "1.2.3.1",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].IPStart is invalid",
},
{
name: "ip end empty",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
Gateway: "1.2.3.1",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].IPEnd can not be empty",
},
{
name: "ip end invalid",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
IPEnd: "invalid",
Gateway: "1.2.3.1",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].IPEnd is invalid",
},
{
name: "ip gateway empty",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
IPEnd: "1.2.3.5",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].Gateway can not be empty",
},
{
name: "ip gateway invalid",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
IPEnd: "1.2.3.5",
Gateway: "invalid",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].Gateway is invalid",
},
{
name: "ip end smaller than ip start",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.5",
IPEnd: "1.2.3.2",
Gateway: "1.2.3.2",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].IPStart should be smaller than IPEnd",
},
{
name: "subnet empty",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
IPEnd: "1.2.3.5",
Gateway: "1.2.3.1",
},
},
},
},
wantErr: "SnowIPPool Pools[0].Subnet can not be empty",
},
{
name: "subnet invalid",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
IPEnd: "1.2.3.5",
Gateway: "1.2.3.1",
Subnet: "invalid",
},
},
},
},
wantErr: "SnowIPPool Pools[0].Subnet is invalid",
},
{
name: "ip start fell out of subnet range",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.2.2",
IPEnd: "1.2.3.5",
Gateway: "1.2.3.1",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].IPStart should be within the subnet range 1.2.3.4/24",
},
{
name: "ip end fell out of subnet range",
obj: &v1alpha1.SnowIPPool{
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "1.2.3.2",
IPEnd: "1.2.4.4",
Gateway: "1.2.3.1",
Subnet: "1.2.3.4/24",
},
},
},
},
wantErr: "SnowIPPool Pools[0].IPEnd should be within the subnet range 1.2.3.4/24",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := tt.obj.Validate()
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
| 266 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SnowIPPoolSpec defines the desired state of SnowIPPool.
type SnowIPPoolSpec struct {
// IPPools defines a list of ip pool for the DNI.
Pools []IPPool `json:"pools,omitempty"`
}
// IPPool defines an ip pool with ip range, subnet and gateway.
type IPPool struct {
// IPStart is the start address of an ip range.
IPStart string `json:"ipStart"`
// IPEnd is the end address of an ip range.
IPEnd string `json:"ipEnd"`
// Subnet is used to determine whether an ip is within subnet.
Subnet string `json:"subnet"`
// Gateway is the gateway of the subnet for routing purpose.
Gateway string `json:"gateway"`
}
// SnowIPPoolStatus defines the observed state of SnowIPPool.
type SnowIPPoolStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// SnowIPPool is the Schema for the SnowIPPools API.
type SnowIPPool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SnowIPPoolSpec `json:"spec,omitempty"`
Status SnowIPPoolStatus `json:"status,omitempty"`
}
// Validate validates the fields in a SnowIPPool object.
func (s *SnowIPPool) Validate() error {
return validateSnowIPPool(s)
}
// ConvertConfigToConfigGenerateStruct converts a SnowIPPool to SnowIPPoolGenerate object.
func (s *SnowIPPool) ConvertConfigToConfigGenerateStruct() *SnowIPPoolGenerate {
namespace := defaultEksaNamespace
if s.Namespace != "" {
namespace = s.Namespace
}
config := &SnowIPPoolGenerate{
TypeMeta: s.TypeMeta,
ObjectMeta: ObjectMeta{
Name: s.Name,
Annotations: s.Annotations,
Namespace: namespace,
},
Spec: s.Spec,
}
return config
}
// +kubebuilder:object:generate=false
// SnowIPPoolGenerate is same as SnowIPPool except stripped down for generation of yaml file during generate clusterconfig.
type SnowIPPoolGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec SnowIPPoolSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// SnowIPPoolList contains a list of SnowIPPool.
type SnowIPPoolList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SnowIPPool `json:"items"`
}
func init() {
SchemeBuilder.Register(&SnowIPPool{}, &SnowIPPoolList{})
}
| 89 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var snowippoollog = logf.Log.WithName("snowippool-resource")
// SetupWebhookWithManager sets up the webhook manager for SnowIPPool.
func (r *SnowIPPool) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-snowippool,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=snowippools,verbs=create;update,versions=v1alpha1,name=validation.snowippool.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &SnowIPPool{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowIPPool) ValidateCreate() error {
snowippoollog.Info("validate create", "name", r.Name)
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowIPPool) ValidateUpdate(old runtime.Object) error {
snowippoollog.Info("validate update", "name", r.Name)
oldPool, ok := old.(*SnowIPPool)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a SnowIPPool but got a %T", old))
}
if allErrs := validateImmutableFieldsSnowIPPool(r, oldPool); len(allErrs) != 0 {
return apierrors.NewInvalid(GroupVersion.WithKind(SnowIPPoolKind).GroupKind(), r.Name, allErrs)
}
return r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowIPPool) ValidateDelete() error {
snowippoollog.Info("validate delete", "name", r.Name)
return nil
}
func validateImmutableFieldsSnowIPPool(new, old *SnowIPPool) field.ErrorList {
var allErrs field.ErrorList
if !SnowIPPoolsSliceEqual(new.Spec.Pools, old.Spec.Pools) {
allErrs = append(
allErrs,
field.Forbidden(field.NewPath("spec").Child("pools"), "field is immutable"),
)
}
return allErrs
}
| 69 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestSnowIPPoolValidateCreate(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
g.Expect(new.ValidateCreate()).To(Succeed())
}
func TestSnowIPPoolValidateCreateInvalidIPPool(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
new.Spec.Pools[0].IPStart = "invalid"
g.Expect(new.ValidateCreate()).To(MatchError(ContainSubstring("SnowIPPool Pools[0].IPStart is invalid")))
}
func TestSnowIPPoolValidateUpdate(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
old := new.DeepCopy()
g.Expect(new.ValidateUpdate(old)).To(Succeed())
}
func TestSnowIPPoolValidateUpdateInvalidIPPool(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
new.Spec.Pools[0].IPStart = "invalid"
old := new.DeepCopy()
g.Expect(new.ValidateUpdate(old)).To(MatchError(ContainSubstring("SnowIPPool Pools[0].IPStart is invalid")))
}
func TestSnowIPPoolValidateUpdateInvalidObjectType(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
old := &v1alpha1.SnowDatacenterConfig{}
g.Expect(new.ValidateUpdate(old)).To(MatchError(ContainSubstring("expected a SnowIPPool but got a *v1alpha1.SnowDatacenterConfig")))
}
func TestSnowIPPoolValidateUpdateIPPoolsSame(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
old := new.DeepCopy()
new.Spec.Pools = []v1alpha1.IPPool{
{
IPStart: "192.168.1.20",
IPEnd: "192.168.1.30",
Gateway: "192.168.1.1",
Subnet: "192.168.1.0/24",
},
{
IPStart: "192.168.1.2",
IPEnd: "192.168.1.14",
Gateway: "192.168.1.1",
Subnet: "192.168.1.0/24",
},
}
g.Expect(new.ValidateUpdate(old)).To(Succeed())
}
func TestSnowIPPoolValidateUpdateIPPoolsLengthDiff(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
old := new.DeepCopy()
old.Spec.Pools = []v1alpha1.IPPool{
{
IPStart: "start",
},
}
g.Expect(new.ValidateUpdate(old)).To(MatchError(ContainSubstring("spec.pools: Forbidden: field is immutable")))
}
func TestSnowIPPoolValidateUpdateIPPoolsDiff(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
old := new.DeepCopy()
new.Spec.Pools = []v1alpha1.IPPool{
{
IPStart: "192.168.1.21",
IPEnd: "192.168.1.30",
Gateway: "192.168.1.1",
Subnet: "192.168.1.0/24",
},
{
IPStart: "192.168.1.2",
IPEnd: "192.168.1.14",
Gateway: "192.168.1.1",
Subnet: "192.168.1.0/24",
},
}
g.Expect(new.ValidateUpdate(old)).To(MatchError(ContainSubstring("spec.pools: Forbidden: field is immutable")))
}
func TestSnowIPPoolValidateDelete(t *testing.T) {
g := NewWithT(t)
new := snowIPPool()
g.Expect(new.ValidateDelete()).To(Succeed())
}
func snowIPPool() v1alpha1.SnowIPPool {
return v1alpha1.SnowIPPool{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "192.168.1.2",
IPEnd: "192.168.1.14",
Gateway: "192.168.1.1",
Subnet: "192.168.1.0/24",
},
{
IPStart: "192.168.1.20",
IPEnd: "192.168.1.30",
Gateway: "192.168.1.1",
Subnet: "192.168.1.0/24",
},
},
},
Status: v1alpha1.SnowIPPoolStatus{},
}
}
| 130 |
eks-anywhere | aws | Go | package v1alpha1
import (
"errors"
"fmt"
"regexp"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/logger"
snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1"
)
const (
SnowMachineConfigKind = "SnowMachineConfig"
DefaultSnowSSHKeyName = ""
DefaultSnowInstanceType = "sbe-c.large"
DefaultSnowPhysicalNetworkConnectorType = SFPPlus
DefaultOSFamily = Ubuntu
MinimumContainerVolumeSizeUbuntu = 8
MinimumContainerVolumeSizeBottlerocket = 25
MinimumNonRootVolumeSize = 8
)
var snowInstanceTypesRegex = regexp.MustCompile(`^sbe-[cg]\.\d*x?large$`)
// NewSnowMachineConfigGenerate generates snowMachineConfig example for generate clusterconfig command.
func NewSnowMachineConfigGenerate(name string) *SnowMachineConfigGenerate {
return &SnowMachineConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: SnowMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: name,
},
Spec: SnowMachineConfigSpec{
AMIID: "",
Devices: []string{""},
InstanceType: DefaultSnowInstanceType,
SshKeyName: DefaultSnowSSHKeyName,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
OSFamily: DefaultOSFamily,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
},
}
}
func (s *SnowMachineConfigGenerate) APIVersion() string {
return s.TypeMeta.APIVersion
}
func (s *SnowMachineConfigGenerate) Kind() string {
return s.TypeMeta.Kind
}
func (s *SnowMachineConfigGenerate) Name() string {
return s.ObjectMeta.Name
}
func validateSnowMachineConfig(config *SnowMachineConfig) error {
if err := validateSnowMachineConfigInstanceType(string(config.Spec.InstanceType)); err != nil {
return err
}
if config.Spec.PhysicalNetworkConnector != SFPPlus && config.Spec.PhysicalNetworkConnector != QSFP && config.Spec.PhysicalNetworkConnector != RJ45 {
return fmt.Errorf("SnowMachineConfig PhysicalNetworkConnector %s is not supported, please use one of the following: %s, %s, %s ", config.Spec.PhysicalNetworkConnector, SFPPlus, QSFP, RJ45)
}
if len(config.Spec.Devices) == 0 {
return errors.New("SnowMachineConfig Devices must contain at least one device IP")
}
if len(config.Spec.OSFamily) <= 0 {
return errors.New("SnowMachineConfig OSFamily must be specified")
}
if config.Spec.OSFamily != Bottlerocket && config.Spec.OSFamily != Ubuntu {
return fmt.Errorf("SnowMachineConfig OSFamily %s is not supported, please use one of the following: %s, %s", config.Spec.OSFamily, Bottlerocket, Ubuntu)
}
if err := validateSnowMachineConfigNetwork(config.Spec.Network); err != nil {
return err
}
if err := validateSnowMachineConfigContainerVolume(config); err != nil {
return err
}
if err := validateHostOSConfig(config.Spec.HostOSConfiguration, config.Spec.OSFamily); err != nil {
return fmt.Errorf("SnowMachineConfig HostOSConfiguration is invalid: %v", err)
}
return validateSnowMachineConfigNonRootVolumes(config.Spec.NonRootVolumes)
}
func validateSnowMachineConfigInstanceType(instanceType string) error {
match := snowInstanceTypesRegex.FindStringSubmatch(instanceType)
if match == nil {
return fmt.Errorf("SnowMachineConfig InstanceType %s is not supported", instanceType)
}
return nil
}
func validateSnowMachineConfigContainerVolume(config *SnowMachineConfig) error {
// The Bottlerocket AWS Variant AMI only has 2 Gi of data volume, which is insufficient to store EKS-A and user container volumes.
// Thus the ContainersVolume is required and its size must be no smaller than 25 Gi.
if config.Spec.OSFamily == Bottlerocket {
if config.Spec.ContainersVolume == nil {
return errors.New("SnowMachineConfig ContainersVolume must be specified for Bottlerocket OS")
}
if config.Spec.ContainersVolume.Size < MinimumContainerVolumeSizeBottlerocket {
return fmt.Errorf("SnowMachineConfig ContainersVolume.Size must be no smaller than %d Gi for Bottlerocket OS", MinimumContainerVolumeSizeBottlerocket)
}
}
if config.Spec.OSFamily == Ubuntu && config.Spec.ContainersVolume != nil && config.Spec.ContainersVolume.Size < MinimumContainerVolumeSizeUbuntu {
return fmt.Errorf("SnowMachineConfig ContainersVolume.Size must be no smaller than %d Gi for Ubuntu OS", MinimumContainerVolumeSizeUbuntu)
}
return nil
}
func validateSnowMachineConfigNonRootVolumes(volumes []*snowv1.Volume) error {
for i, v := range volumes {
if v == nil {
continue
}
if len(v.DeviceName) <= 0 {
return fmt.Errorf("SnowMachineConfig NonRootVolumes[%d].DeviceName must be specified", i)
}
if strings.HasPrefix(v.DeviceName, "/dev/sda") {
return fmt.Errorf("SnowMachineConfig NonRootVolumes[%d].DeviceName [%s] is invalid. Device name with prefix /dev/sda* is reserved for root volume and containers volume, please use another name", i, v.DeviceName)
}
if v.Size < MinimumNonRootVolumeSize {
return fmt.Errorf("SnowMachineConfig NonRootVolumes[%d].Size must be no smaller than %d Gi", i, MinimumNonRootVolumeSize)
}
}
return nil
}
func validateSnowMachineConfigNetwork(network SnowNetwork) error {
if len(network.DirectNetworkInterfaces) <= 0 {
return errors.New("SnowMachineConfig Network.DirectNetworkInterfaces length must be no smaller than 1")
}
primaryDNICount := 0
for _, dni := range network.DirectNetworkInterfaces {
if dni.Primary {
primaryDNICount++
}
}
if primaryDNICount != 1 {
return errors.New("SnowMachineConfig Network.DirectNetworkInterfaces list must contain one and only one primary DNI")
}
return nil
}
func setSnowMachineConfigDefaults(config *SnowMachineConfig) {
if config.Spec.InstanceType == "" {
config.Spec.InstanceType = DefaultSnowInstanceType
logger.V(1).Info("SnowMachineConfig InstanceType is empty. Using default", "default instance type", DefaultSnowInstanceType)
}
if config.Spec.PhysicalNetworkConnector == "" {
config.Spec.PhysicalNetworkConnector = DefaultSnowPhysicalNetworkConnectorType
logger.V(1).Info("SnowMachineConfig PhysicalNetworkConnector is empty. Using default", "default physical network connector", DefaultSnowPhysicalNetworkConnectorType)
}
}
| 184 |
eks-anywhere | aws | Go | package v1alpha1
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1"
)
func TestSnowMachineConfigSetDefaults(t *testing.T) {
tests := []struct {
name string
before *SnowMachineConfig
after *SnowMachineConfig
}{
{
name: "optional fields all empty",
before: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{},
},
after: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
},
},
},
{
name: "instance type exists",
before: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
InstanceType: "instance-type-1",
},
},
after: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
InstanceType: "instance-type-1",
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
},
},
},
{
name: "ssh key name exists",
before: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
SshKeyName: "ssh-name",
},
},
after: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
SshKeyName: "ssh-name",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
},
},
},
{
name: "physical network exists",
before: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
PhysicalNetworkConnector: "network-1",
},
},
after: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
PhysicalNetworkConnector: "network-1",
InstanceType: DefaultSnowInstanceType,
},
},
},
{
name: "os family exists",
before: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
OSFamily: "ubuntu",
},
},
after: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
OSFamily: Ubuntu,
},
},
},
{
name: "HostOSConfiguration exists",
before: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
OSFamily: Bottlerocket,
HostOSConfiguration: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kernel: &v1beta1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
},
},
},
},
},
},
after: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
OSFamily: Bottlerocket,
HostOSConfiguration: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kernel: &v1beta1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{"foo": "bar"},
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
tt.before.SetDefaults()
g.Expect(tt.before).To(Equal(tt.after))
})
}
}
func TestSnowMachineConfigValidate(t *testing.T) {
tests := []struct {
name string
obj *SnowMachineConfig
wantErr string
}{
{
name: "valid config with amiID, instance type, physical network interface, devices, network, container volume, osFamily",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "",
},
{
name: "valid without ami",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
InstanceType: "sbe-c.4xlarge",
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Ubuntu,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "",
},
{
name: "invalid instance type sbe-g.largex",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: "sbe-g.largex",
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "SnowMachineConfig InstanceType sbe-g.largex is not supported",
},
{
name: "invalid instance type sbe-c-xlarge",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: "sbe-c-large",
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "SnowMachineConfig InstanceType sbe-c-large is not supported",
},
{
name: "invalid instance type sbe-c.elarge",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: "sbe-c.elarge",
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "SnowMachineConfig InstanceType sbe-c.elarge is not supported",
},
{
name: "invalid physical network connector",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: "sbe-c.64xlarge",
PhysicalNetworkConnector: "invalid-physical-network",
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "PhysicalNetworkConnector invalid-physical-network is not supported",
},
{
name: "empty devices",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: "sbe-g.large",
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "Devices must contain at least one device IP",
},
{
name: "invalid container volume size for ubuntu",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Ubuntu,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 7,
},
},
},
wantErr: "SnowMachineConfig ContainersVolume.Size must be no smaller than 8 Gi",
},
{
name: "invalid container volume size for bottlerocket",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 24,
},
},
},
wantErr: "SnowMachineConfig ContainersVolume.Size must be no smaller than 25 Gi",
},
{
name: "container volume not specified for bottlerocket",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
},
},
wantErr: "SnowMachineConfig ContainersVolume must be specified for Bottlerocket OS",
},
{
name: "invalid os family",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: "invalidOS",
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "SnowMachineConfig OSFamily invalidOS is not supported",
},
{
name: "empty os family",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: "",
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "SnowMachineConfig OSFamily must be specified",
},
{
name: "empty network",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
ContainersVolume: &snowv1.Volume{
Size: 25,
},
},
},
wantErr: "SnowMachineConfig Network.DirectNetworkInterfaces length must be no smaller than 1",
},
{
name: "invalid network",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
ContainersVolume: &snowv1.Volume{
Size: 25,
},
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: false,
},
},
},
},
},
wantErr: "SnowMachineConfig Network.DirectNetworkInterfaces list must contain one and only one primary DNI",
},
{
name: "invalid nonRootVolumes, device name empty",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
NonRootVolumes: []*snowv1.Volume{
{
DeviceName: "",
Size: 25,
},
},
},
},
wantErr: "SnowMachineConfig NonRootVolumes[0].DeviceName must be specified",
},
{
name: "invalid nonRootVolumes, device name prefix /dev/sda",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
NonRootVolumes: []*snowv1.Volume{
{
DeviceName: "/dev/sda1",
Size: 25,
},
},
},
},
wantErr: "SnowMachineConfig NonRootVolumes[0].DeviceName [/dev/sda1] is invalid. Device name with prefix /dev/sda* is reserved for root volume and containers volume, please use another name",
},
{
name: "invalid nonRootVolumes, size smaller that 8Gi",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Bottlerocket,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
NonRootVolumes: []*snowv1.Volume{
{
DeviceName: "/dev/sdc",
Size: 7,
},
},
},
},
wantErr: "SnowMachineConfig NonRootVolumes[0].Size must be no smaller than 8 Gi",
},
{
name: "invalid HostOSConfiguration",
obj: &SnowMachineConfig{
Spec: SnowMachineConfigSpec{
AMIID: "ami-1",
InstanceType: DefaultSnowInstanceType,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
Devices: []string{"1.2.3.4"},
OSFamily: Ubuntu,
HostOSConfiguration: &HostOSConfiguration{
BottlerocketConfiguration: &BottlerocketConfiguration{
Kernel: &v1beta1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{"foo": "bar"},
},
},
},
ContainersVolume: &snowv1.Volume{
Size: 25,
},
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
},
},
wantErr: "SnowMachineConfig HostOSConfiguration is invalid: BottlerocketConfiguration can only be used with osFamily: \"bottlerocket\"",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := tt.obj.Validate()
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestSnowMachineConfigSetControlPlaneAnnotation(t *testing.T) {
g := NewWithT(t)
m := &SnowMachineConfig{}
m.SetControlPlaneAnnotation()
g.Expect(m.Annotations).To(Equal(map[string]string{"anywhere.eks.amazonaws.com/control-plane": "true"}))
}
func TestSnowMachineConfigSetEtcdAnnotation(t *testing.T) {
g := NewWithT(t)
m := &SnowMachineConfig{}
m.SetEtcdAnnotation()
g.Expect(m.Annotations).To(Equal(map[string]string{"anywhere.eks.amazonaws.com/etcd": "true"}))
}
func TestNewSnowMachineConfigGenerate(t *testing.T) {
g := NewWithT(t)
want := &SnowMachineConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: SnowMachineConfigKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: "snow-cluster",
},
Spec: SnowMachineConfigSpec{
AMIID: "",
Devices: []string{""},
InstanceType: DefaultSnowInstanceType,
SshKeyName: DefaultSnowSSHKeyName,
PhysicalNetworkConnector: DefaultSnowPhysicalNetworkConnectorType,
OSFamily: DefaultOSFamily,
Network: SnowNetwork{
DirectNetworkInterfaces: []SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
},
}
g.Expect(NewSnowMachineConfigGenerate("snow-cluster")).To(Equal(want))
}
| 655 |
eks-anywhere | aws | Go | package v1alpha1
import (
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1"
)
const (
SFPPlus PhysicalNetworkConnectorType = "SFP_PLUS"
QSFP PhysicalNetworkConnectorType = "QSFP"
RJ45 PhysicalNetworkConnectorType = "RJ45"
)
type PhysicalNetworkConnectorType string
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Important: Run "make generate" to regenerate code after modifying this file
// SnowMachineConfigSpec defines the desired state of SnowMachineConfigSpec.
type SnowMachineConfigSpec struct {
// Important: Run "make generate" to regenerate code after modifying this file
// The AMI ID from which to create the machine instance.
AMIID string `json:"amiID,omitempty"`
// InstanceType is the type of instance to create.
InstanceType string `json:"instanceType,omitempty"`
// PhysicalNetworkConnector is the physical network connector type to use for creating direct network interfaces (DNI).
// Valid values: "SFP_PLUS" (default), "QSFP" and "RJ45".
PhysicalNetworkConnector PhysicalNetworkConnectorType `json:"physicalNetworkConnector,omitempty"`
// SSHKeyName is the name of the ssh key defined in the aws snow key pairs, to attach to the instance.
SshKeyName string `json:"sshKeyName,omitempty"`
// Devices contains a device ip list assigned by the user to provision machines.
Devices []string `json:"devices,omitempty"`
// ContainersVolume provides the configuration options for the containers data storage volume.
ContainersVolume *snowv1.Volume `json:"containersVolume,omitempty"`
// NonRootVolumes provides the configuration options for the non root storage volumes.
NonRootVolumes []*snowv1.Volume `json:"nonRootVolumes,omitempty"`
// OSFamily is the node instance OS.
// Valid values: "bottlerocket" and "ubuntu".
OSFamily OSFamily `json:"osFamily,omitempty"`
// Network provides the custom network setting for the machine.
Network SnowNetwork `json:"network"`
// HostOSConfiguration provides OS specific configurations for the machine
HostOSConfiguration *HostOSConfiguration `json:"hostOSConfiguration,omitempty"`
}
// SnowNetwork specifies the network configurations for snow.
type SnowNetwork struct {
// DirectNetworkInterfaces contains a list of direct network interface (DNI) configuration.
// +kubebuilder:validation:MinItems=1
// +kubebuilder:validation:MaxItems=8
DirectNetworkInterfaces []SnowDirectNetworkInterface `json:"directNetworkInterfaces,omitempty"`
}
// SnowDirectNetworkInterface defines a direct network interface (DNI) configuration.
type SnowDirectNetworkInterface struct {
// Index is the index number of DNI used to clarify the position in the list. Usually starts with 1.
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=8
// +optional
Index int `json:"index,omitempty"`
// VlanID is the vlan id assigned by the user for the DNI.
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=4095
// +optional
VlanID *int32 `json:"vlanID,omitempty"`
// DHCP defines whether DHCP is used to assign ip for the DNI.
// +optional
DHCP bool `json:"dhcp,omitempty"`
// IPPool contains a reference to a snow ip pool which provides a range of ip addresses.
// When specified, an ip address selected from the pool is allocated to this DNI.
// +optional
IPPoolRef *Ref `json:"ipPoolRef,omitempty"`
// Primary indicates whether the DNI is primary or not.
// +optional
Primary bool `json:"primary,omitempty"`
}
func (s *SnowMachineConfig) SetManagedBy(clusterName string) {
if s.Annotations == nil {
s.Annotations = map[string]string{}
}
s.Annotations[managementAnnotation] = clusterName
}
func (s *SnowMachineConfig) OSFamily() OSFamily {
return s.Spec.OSFamily
}
// SnowMachineConfigStatus defines the observed state of SnowMachineConfig.
type SnowMachineConfigStatus struct {
// SpecValid is set to true if vspheredatacenterconfig is validated.
SpecValid bool `json:"specValid,omitempty"`
// FailureMessage indicates that there is a fatal problem reconciling the
// state, and will be set to a descriptive error message.
// +optional
FailureMessage *string `json:"failureMessage,omitempty"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// SnowMachineConfig is the Schema for the SnowMachineConfigs API.
type SnowMachineConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SnowMachineConfigSpec `json:"spec,omitempty"`
Status SnowMachineConfigStatus `json:"status,omitempty"`
}
func (s *SnowMachineConfig) SetDefaults() {
setSnowMachineConfigDefaults(s)
}
func (s *SnowMachineConfig) Validate() error {
return validateSnowMachineConfig(s)
}
// ValidateHasSSHKeyName verifies a SnowMachineConfig object must have a SshKeyName.
// This validation only runs in SnowMachineConfig validation webhook, as we support
// auto-generate and import ssh key when creating a cluster via CLI.
func (s *SnowMachineConfig) ValidateHasSSHKeyName() error {
if len(s.Spec.SshKeyName) <= 0 {
return errors.New("SnowMachineConfig SshKeyName must not be empty")
}
return nil
}
func (s *SnowMachineConfig) SetControlPlaneAnnotation() {
if s.Annotations == nil {
s.Annotations = map[string]string{}
}
s.Annotations[controlPlaneAnnotation] = "true"
}
func (s *SnowMachineConfig) SetEtcdAnnotation() {
if s.Annotations == nil {
s.Annotations = map[string]string{}
}
s.Annotations[etcdAnnotation] = "true"
}
// IPPoolRefs returns a slice of snow IP pools that belongs to a snowMachineConfig.
func (s *SnowMachineConfig) IPPoolRefs() []Ref {
ipPoolRefMap := make(refSet, 1)
for _, dni := range s.Spec.Network.DirectNetworkInterfaces {
ipPoolRefMap.addIfNotNil(dni.IPPoolRef)
}
return ipPoolRefMap.toSlice()
}
// +kubebuilder:object:generate=false
// Same as SnowMachineConfig except stripped down for generation of yaml file during generate clusterconfig.
type SnowMachineConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec SnowMachineConfigSpec `json:"spec,omitempty"`
}
func (s *SnowMachineConfig) ConvertConfigToConfigGenerateStruct() *SnowMachineConfigGenerate {
namespace := defaultEksaNamespace
if s.Namespace != "" {
namespace = s.Namespace
}
config := &SnowMachineConfigGenerate{
TypeMeta: s.TypeMeta,
ObjectMeta: ObjectMeta{
Name: s.Name,
Annotations: s.Annotations,
Namespace: namespace,
},
Spec: s.Spec,
}
return config
}
func (s *SnowMachineConfig) Marshallable() Marshallable {
return s.ConvertConfigToConfigGenerateStruct()
}
//+kubebuilder:object:root=true
// SnowMachineConfigList contains a list of SnowMachineConfig.
type SnowMachineConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SnowMachineConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&SnowMachineConfig{}, &SnowMachineConfigList{})
}
| 217 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestSnowMachineConfigIPPoolRefs(t *testing.T) {
m := &v1alpha1.SnowMachineConfig{
Spec: v1alpha1.SnowMachineConfigSpec{
Network: v1alpha1.SnowNetwork{
DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{
{
IPPoolRef: &v1alpha1.Ref{
Kind: v1alpha1.SnowIPPoolKind,
Name: "ip-pool-1",
},
},
{
IPPoolRef: &v1alpha1.Ref{
Kind: v1alpha1.SnowIPPoolKind,
Name: "ip-pool-2",
},
},
{
IPPoolRef: &v1alpha1.Ref{
Kind: v1alpha1.SnowIPPoolKind,
Name: "ip-pool-1", // test duplicates
},
},
},
},
},
}
want := []v1alpha1.Ref{
{
Kind: v1alpha1.SnowIPPoolKind,
Name: "ip-pool-1",
},
{
Kind: v1alpha1.SnowIPPoolKind,
Name: "ip-pool-2",
},
}
got := m.IPPoolRefs()
if !v1alpha1.RefSliceEqual(got, want) {
t.Fatalf("Expected %v, got %v", want, got)
}
}
| 54 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var snowmachineconfiglog = logf.Log.WithName("snowmachineconfig-resource")
func (r *SnowMachineConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
//+kubebuilder:webhook:path=/mutate-anywhere-eks-amazonaws-com-v1alpha1-snowmachineconfig,mutating=true,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=snowmachineconfigs,verbs=create;update,versions=v1alpha1,name=mutation.snowmachineconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Defaulter = &SnowMachineConfig{}
// Default implements webhook.Defaulter so a webhook will be registered for the type.
func (r *SnowMachineConfig) Default() {
snowmachineconfiglog.Info("Setting up Snow Machine Config defaults for", "name", r.Name)
r.SetDefaults()
}
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-anywhere-eks-amazonaws-com-v1alpha1-snowmachineconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=anywhere.eks.amazonaws.com,resources=snowmachineconfigs,verbs=create;update,versions=v1alpha1,name=validation.snowmachineconfig.anywhere.amazonaws.com,admissionReviewVersions={v1,v1beta1}
var _ webhook.Validator = &SnowMachineConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowMachineConfig) ValidateCreate() error {
snowmachineconfiglog.Info("validate create", "name", r.Name)
if err := r.ValidateHasSSHKeyName(); err != nil {
return err
}
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowMachineConfig) ValidateUpdate(old runtime.Object) error {
snowmachineconfiglog.Info("validate update", "name", r.Name)
if err := r.ValidateHasSSHKeyName(); err != nil {
return err
}
return r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (r *SnowMachineConfig) ValidateDelete() error {
snowmachineconfiglog.Info("validate delete", "name", r.Name)
// TODO(user): fill in your validation logic upon object deletion.
return nil
}
| 77 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1"
)
func TestSnowMachineConfigSetDefaults(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
sOld.Default()
g.Expect(sOld.Spec.InstanceType).To(Equal(v1alpha1.DefaultSnowInstanceType))
g.Expect(sOld.Spec.PhysicalNetworkConnector).To(Equal(v1alpha1.DefaultSnowPhysicalNetworkConnectorType))
}
func TestSnowMachineConfigValidateCreateNoAMI(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
sOld.Spec.SshKeyName = "testKey"
sOld.Spec.InstanceType = v1alpha1.DefaultSnowInstanceType
sOld.Spec.PhysicalNetworkConnector = v1alpha1.SFPPlus
sOld.Spec.Devices = []string{"1.2.3.4"}
sOld.Spec.OSFamily = v1alpha1.Bottlerocket
sOld.Spec.ContainersVolume = &snowv1.Volume{
Size: 25,
}
sOld.Spec.Network = v1alpha1.SnowNetwork{
DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
}
g.Expect(sOld.ValidateCreate()).To(Succeed())
}
func TestSnowMachineConfigValidateCreateInvalidInstanceType(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
sOld.Spec.SshKeyName = "testKey"
sOld.Spec.InstanceType = "invalid-instance-type"
g.Expect(sOld.ValidateCreate()).To(MatchError(ContainSubstring("SnowMachineConfig InstanceType invalid-instance-type is not supported")))
}
func TestSnowMachineConfigValidateCreateEmptySSHKeyName(t *testing.T) {
g := NewWithT(t)
s := snowMachineConfig()
s.Spec.InstanceType = v1alpha1.DefaultSnowInstanceType
s.Spec.PhysicalNetworkConnector = v1alpha1.SFPPlus
s.Spec.Devices = []string{"1.2.3.4"}
s.Spec.OSFamily = v1alpha1.Ubuntu
s.Spec.Network = v1alpha1.SnowNetwork{
DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
}
g.Expect(s.ValidateCreate()).To(MatchError(ContainSubstring("SnowMachineConfig SshKeyName must not be empty")))
}
func TestSnowMachineConfigValidateCreate(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
sOld.Spec.AMIID = "testAMI"
sOld.Spec.SshKeyName = "testKey"
sOld.Spec.InstanceType = v1alpha1.DefaultSnowInstanceType
sOld.Spec.PhysicalNetworkConnector = v1alpha1.SFPPlus
sOld.Spec.Devices = []string{"1.2.3.4"}
sOld.Spec.OSFamily = v1alpha1.Bottlerocket
sOld.Spec.ContainersVolume = &snowv1.Volume{
Size: 25,
}
sOld.Spec.Network = v1alpha1.SnowNetwork{
DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
}
g.Expect(sOld.ValidateCreate()).To(Succeed())
}
func TestSnowMachineConfigValidateUpdate(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
sNew := sOld.DeepCopy()
sNew.Spec.AMIID = "testAMI"
sNew.Spec.SshKeyName = "testKey"
sNew.Spec.InstanceType = v1alpha1.DefaultSnowInstanceType
sNew.Spec.PhysicalNetworkConnector = v1alpha1.SFPPlus
sNew.Spec.Devices = []string{"1.2.3.4"}
sNew.Spec.OSFamily = v1alpha1.Bottlerocket
sNew.Spec.ContainersVolume = &snowv1.Volume{
Size: 25,
}
sNew.Spec.Network = v1alpha1.SnowNetwork{
DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
}
g.Expect(sNew.ValidateUpdate(&sOld)).To(Succeed())
}
func TestSnowMachineConfigValidateUpdateNoDevices(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
sNew := sOld.DeepCopy()
sNew.Spec.AMIID = "testAMI"
sNew.Spec.SshKeyName = "testKey"
sNew.Spec.InstanceType = v1alpha1.DefaultSnowInstanceType
sNew.Spec.PhysicalNetworkConnector = v1alpha1.SFPPlus
sNew.Spec.OSFamily = v1alpha1.Bottlerocket
g.Expect(sNew.ValidateUpdate(&sOld)).To(MatchError(ContainSubstring("Devices must contain at least one device IP")))
}
func TestSnowMachineConfigValidateUpdateEmptySSHKeyName(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
sNew := sOld.DeepCopy()
sNew.Spec.AMIID = "testAMI"
sNew.Spec.InstanceType = v1alpha1.DefaultSnowInstanceType
sNew.Spec.PhysicalNetworkConnector = v1alpha1.SFPPlus
sNew.Spec.OSFamily = v1alpha1.Bottlerocket
g.Expect(sNew.ValidateUpdate(&sOld)).To(MatchError(ContainSubstring("SnowMachineConfig SshKeyName must not be empty")))
}
// Unit test to pass the code coverage job.
func TestSnowMachineConfigValidateDelete(t *testing.T) {
g := NewWithT(t)
sOld := snowMachineConfig()
g.Expect(sOld.ValidateDelete()).To(Succeed())
}
func snowMachineConfig() v1alpha1.SnowMachineConfig {
return v1alpha1.SnowMachineConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Annotations: make(map[string]string, 2)},
Spec: v1alpha1.SnowMachineConfigSpec{},
Status: v1alpha1.SnowMachineConfigStatus{},
}
}
| 172 |
eks-anywhere | aws | Go | package v1alpha1
import (
"errors"
"fmt"
"net/url"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/networkutils"
)
const TinkerbellDatacenterKind = "TinkerbellDatacenterConfig"
// Used for generating yaml for generate clusterconfig command.
func NewTinkerbellDatacenterConfigGenerate(clusterName string) *TinkerbellDatacenterConfigGenerate {
return &TinkerbellDatacenterConfigGenerate{
TypeMeta: metav1.TypeMeta{
Kind: TinkerbellDatacenterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: ObjectMeta{
Name: clusterName,
},
Spec: TinkerbellDatacenterConfigSpec{},
}
}
func (c *TinkerbellDatacenterConfigGenerate) APIVersion() string {
return c.TypeMeta.APIVersion
}
func (c *TinkerbellDatacenterConfigGenerate) Kind() string {
return c.TypeMeta.Kind
}
func (c *TinkerbellDatacenterConfigGenerate) Name() string {
return c.ObjectMeta.Name
}
func GetTinkerbellDatacenterConfig(fileName string) (*TinkerbellDatacenterConfig, error) {
var clusterConfig TinkerbellDatacenterConfig
err := ParseClusterConfig(fileName, &clusterConfig)
if err != nil {
return nil, err
}
return &clusterConfig, nil
}
func validateDatacenterConfig(config *TinkerbellDatacenterConfig) error {
if config.Spec.OSImageURL != "" {
if _, err := url.ParseRequestURI(config.Spec.OSImageURL); err != nil {
return fmt.Errorf("parsing osImageOverride: %v", err)
}
}
if config.Spec.HookImagesURLPath != "" {
if _, err := url.ParseRequestURI(config.Spec.HookImagesURLPath); err != nil {
return fmt.Errorf("parsing hookOverride: %v", err)
}
}
if err := validateObjectMeta(config.ObjectMeta); err != nil {
return fmt.Errorf("TinkerbellDatacenterConfig: %v", err)
}
if config.Spec.TinkerbellIP == "" {
return errors.New("TinkerbellDatacenterConfig: missing spec.tinkerbellIP field")
}
if err := networkutils.ValidateIP(config.Spec.TinkerbellIP); err != nil {
return fmt.Errorf("TinkerbellDatacenterConfig: invalid tinkerbell ip: %v", err)
}
return nil
}
func validateObjectMeta(meta metav1.ObjectMeta) error {
if meta.Name == "" {
return errors.New("missing name")
}
return nil
}
| 85 |
eks-anywhere | aws | Go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Important: Run "make generate" to regenerate code after modifying this file
// TinkerbellDatacenterConfigSpec defines the desired state of TinkerbellDatacenterConfig.
type TinkerbellDatacenterConfigSpec struct {
// TinkerbellIP is used to configure a VIP for hosting the Tinkerbell services.
TinkerbellIP string `json:"tinkerbellIP"`
// OSImageURL can be used to override the default OS image path to pull from a local server.
OSImageURL string `json:"osImageURL,omitempty"`
// HookImagesURLPath can be used to override the default Hook images path to pull from a local server.
HookImagesURLPath string `json:"hookImagesURLPath,omitempty"`
// SkipLoadBalancerDeployment when set to "true" can be used to skip deploying a load balancer to expose Tinkerbell stack.
// Users will need to deploy and configure a load balancer manually after the cluster is created.
SkipLoadBalancerDeployment bool `json:"skipLoadBalancerDeployment,omitempty"`
}
// TinkerbellDatacenterConfigStatus defines the observed state of TinkerbellDatacenterConfig
//
// Important: Run "make generate" to regenerate code after modifying this file.
type TinkerbellDatacenterConfigStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// TinkerbellDatacenterConfig is the Schema for the TinkerbellDatacenterConfigs API.
type TinkerbellDatacenterConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TinkerbellDatacenterConfigSpec `json:"spec,omitempty"`
Status TinkerbellDatacenterConfigStatus `json:"status,omitempty"`
}
func (t *TinkerbellDatacenterConfig) Kind() string {
return t.TypeMeta.Kind
}
func (v *TinkerbellDatacenterConfig) ExpectedKind() string {
return TinkerbellDatacenterKind
}
func (t *TinkerbellDatacenterConfig) PauseReconcile() {
if t.Annotations == nil {
t.Annotations = map[string]string{}
}
t.Annotations[pausedAnnotation] = "true"
}
func (t *TinkerbellDatacenterConfig) IsReconcilePaused() bool {
if s, ok := t.Annotations[pausedAnnotation]; ok {
return s == "true"
}
return false
}
func (t *TinkerbellDatacenterConfig) ClearPauseAnnotation() {
if t.Annotations != nil {
delete(t.Annotations, pausedAnnotation)
}
}
// Validate validates the Tinkerbell datacenter config.
func (t *TinkerbellDatacenterConfig) Validate() error {
return validateDatacenterConfig(t)
}
func (t *TinkerbellDatacenterConfig) ConvertConfigToConfigGenerateStruct() *TinkerbellDatacenterConfigGenerate {
namespace := defaultEksaNamespace
if t.Namespace != "" {
namespace = t.Namespace
}
config := &TinkerbellDatacenterConfigGenerate{
TypeMeta: t.TypeMeta,
ObjectMeta: ObjectMeta{
Name: t.Name,
Annotations: t.Annotations,
Namespace: namespace,
},
Spec: t.Spec,
}
return config
}
func (t *TinkerbellDatacenterConfig) Marshallable() Marshallable {
return t.ConvertConfigToConfigGenerateStruct()
}
// +kubebuilder:object:generate=false
// Same as TinkerbellDatacenterConfig except stripped down for generation of yaml file during generate clusterconfig.
type TinkerbellDatacenterConfigGenerate struct {
metav1.TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
Spec TinkerbellDatacenterConfigSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// TinkerbellDatacenterConfigList contains a list of TinkerbellDatacenterConfig.
type TinkerbellDatacenterConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []TinkerbellDatacenterConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&TinkerbellDatacenterConfig{}, &TinkerbellDatacenterConfigList{})
}
| 117 |
eks-anywhere | aws | Go | package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestTinkerbellDatacenterConfigValidateFail(t *testing.T) {
tests := []struct {
name string
tinkDC *v1alpha1.TinkerbellDatacenterConfig
wantErr string
}{
{
name: "Empty Tink IP",
tinkDC: newTinkerbellDatacenterConfig(func(dc *v1alpha1.TinkerbellDatacenterConfig) {
dc.Spec.TinkerbellIP = ""
}),
wantErr: "missing spec.tinkerbellIP field",
},
{
name: "Invalid Tink IP",
tinkDC: newTinkerbellDatacenterConfig(func(dc *v1alpha1.TinkerbellDatacenterConfig) {
dc.Spec.TinkerbellIP = "10"
}),
wantErr: "invalid tinkerbell ip: ",
},
{
name: "Invalid OS Image URL",
tinkDC: newTinkerbellDatacenterConfig(func(dc *v1alpha1.TinkerbellDatacenterConfig) {
dc.Spec.OSImageURL = "test"
}),
wantErr: "parsing osImageOverride: parse \"test\": invalid URI for request",
},
{
name: "Invalid hook Image URL",
tinkDC: newTinkerbellDatacenterConfig(func(dc *v1alpha1.TinkerbellDatacenterConfig) {
dc.Spec.HookImagesURLPath = "test"
}),
wantErr: "parsing hookOverride: parse \"test\": invalid URI for request",
},
{
name: "invalid object data",
tinkDC: newTinkerbellDatacenterConfig(func(dc *v1alpha1.TinkerbellDatacenterConfig) {
dc.ObjectMeta.Name = ""
}),
wantErr: "TinkerbellDatacenterConfig: missing name",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.tinkDC.Validate()).To(MatchError(ContainSubstring(tt.wantErr)))
})
}
}
func TestTinkerbellDatacenterConfigValidateSuccess(t *testing.T) {
tinkDC := createTinkerbellDatacenterConfig()
g := NewWithT(t)
g.Expect(tinkDC.Validate()).To(Succeed())
}
func newTinkerbellDatacenterConfig(opts ...func(*v1alpha1.TinkerbellDatacenterConfig)) *v1alpha1.TinkerbellDatacenterConfig {
c := createTinkerbellDatacenterConfig()
for _, o := range opts {
o(c)
}
return c
}
type tinkerbellDatacenterOpt func(dc *v1alpha1.TinkerbellDatacenterConfig)
func createTinkerbellDatacenterConfig(opts ...tinkerbellDatacenterOpt) *v1alpha1.TinkerbellDatacenterConfig {
dc := &v1alpha1.TinkerbellDatacenterConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Annotations: make(map[string]string, 1),
Name: "tinkerbelldatacenterconfig",
},
Spec: v1alpha1.TinkerbellDatacenterConfigSpec{
TinkerbellIP: "1.1.1.1",
},
Status: v1alpha1.TinkerbellDatacenterConfigStatus{},
}
for _, opt := range opts {
opt(dc)
}
return dc
}
| 99 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.