repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"time"
"github.com/aws/eks-anywhere/release/pkg/clients"
)
// ReleaseConfig contains metadata fields for a release.
type ReleaseConfig struct {
ReleaseVersion string
DevReleaseUriVersion string
BundleNumber int
CliMinVersion string
CliMaxVersion string
CliRepoUrl string
CliRepoSource string
CliRepoHead string
CliRepoBranchName string
BuildRepoUrl string
BuildRepoSource string
BuildRepoHead string
BuildRepoBranchName string
ArtifactDir string
SourceBucket string
ReleaseBucket string
SourceContainerRegistry string
ReleaseContainerRegistry string
CDN string
ReleaseNumber int
ReleaseDate string
ReleaseTime time.Time
DevRelease bool
DryRun bool
Weekly bool
ReleaseEnvironment string
SourceClients *clients.SourceClients
ReleaseClients *clients.ReleaseClients
BundleArtifactsTable map[string][]Artifact
EksAArtifactsTable map[string][]Artifact
}
type ImageTagOverride struct {
Repository string
ReleaseUri string
}
type ArchiveArtifact struct {
SourceS3Key string
SourceS3Prefix string
ArtifactPath string
ReleaseName string
ReleaseS3Path string
ReleaseCdnURI string
OS string
OSName string
Arch []string
GitTag string
ProjectPath string
SourcedFromBranch string
ImageFormat string
}
type ImageArtifact struct {
AssetName string
SourceImageURI string
ReleaseImageURI string
OS string
Arch []string
GitTag string
ProjectPath string
SourcedFromBranch string
}
type ManifestArtifact struct {
SourceS3Prefix string // S3 uri till base to download artifact
SourceS3Key string
ArtifactPath string
ReleaseName string
ReleaseS3Path string
ReleaseCdnURI string
ImageTagOverrides []ImageTagOverride
GitTag string
ProjectPath string
SourcedFromBranch string
Component string
}
type Artifact struct {
Archive *ArchiveArtifact
Image *ImageArtifact
Manifest *ManifestArtifact
}
| 108 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package artifacts
import (
"fmt"
"net/url"
"strings"
"github.com/aws/eks-anywhere/release/pkg/constants"
)
func IsObjectNotFoundError(err error) bool {
return err.Error() == "Requested object not found"
}
func IsImageNotFoundError(err error) bool {
return err.Error() == "Requested image not found"
}
func GetManifestFilepaths(devRelease, weekly bool, bundleNumber int, kind, branch, releaseDate string) string {
var manifestFilepath string
switch kind {
case constants.BundlesKind:
if devRelease {
if branch != "main" {
manifestFilepath = fmt.Sprintf("%s/bundle-release.yaml", branch)
} else {
manifestFilepath = "bundle-release.yaml"
if weekly {
manifestFilepath = fmt.Sprintf("weekly-releases/%s/bundle-release.yaml", releaseDate)
}
}
} else {
manifestFilepath = fmt.Sprintf("releases/bundles/%d/manifest.yaml", bundleNumber)
}
case constants.ReleaseKind:
if devRelease {
if branch != "main" {
manifestFilepath = fmt.Sprintf("%s/eks-a-release.yaml", branch)
} else {
manifestFilepath = "eks-a-release.yaml"
if weekly {
manifestFilepath = fmt.Sprintf("weekly-releases/%s/eks-a-release.yaml", releaseDate)
}
}
} else {
manifestFilepath = "releases/eks-a/manifest.yaml"
}
}
return manifestFilepath
}
func GetFakeSHA(hashType int) (string, error) {
if (hashType != 256) && (hashType != 512) {
return "", fmt.Errorf("unsupported hash algorithm: %d", hashType)
}
var shaSum string
if hashType == 256 {
shaSum = strings.Repeat(constants.HexAlphabet, 4)
} else {
shaSum = strings.Repeat(constants.HexAlphabet, 8)
}
return shaSum, nil
}
func GetLatestUploadDestination(sourcedFromBranch string) string {
if sourcedFromBranch == "main" {
return "latest"
} else {
return sourcedFromBranch
}
}
// GetURI returns an full URL for the given path.
func GetURI(cdn, path string) (string, error) {
uri, err := url.Parse(cdn)
if err != nil {
return "", err
}
uri.Path = path
return uri.String(), nil
}
func SplitImageUri(imageUri, imageContainerRegistry string) (string, string) {
imageUriSplit := strings.Split(imageUri, ":")
imageRepository := strings.Replace(imageUriSplit[0], imageContainerRegistry+"/", "", -1)
imageTag := imageUriSplit[1]
return imageRepository, imageTag
}
| 105 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"sort"
"github.com/pkg/errors"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
"github.com/aws/eks-anywhere/release/pkg/images"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func SortArtifactsMap(m map[string][]releasetypes.Artifact) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
func getKubeRbacProxyImageAttributes(r *releasetypes.ReleaseConfig) (string, string, map[string]string, error) {
gitTag, err := filereader.ReadGitTag(constants.KubeRbacProxyProjectPath, r.BuildRepoSource, r.BuildRepoBranchName)
if err != nil {
return "", "", nil, errors.Cause(err)
}
name := "kube-rbac-proxy"
repoName := fmt.Sprintf("brancz/%s", name)
tagOptions := map[string]string{
"gitTag": gitTag,
"projectPath": constants.KubeRbacProxyProjectPath,
}
return name, repoName, tagOptions, nil
}
func GetKubeRbacProxyImageTagOverride(r *releasetypes.ReleaseConfig) (releasetypes.ImageTagOverride, error) {
name, repoName, tagOptions, err := getKubeRbacProxyImageAttributes(r)
if err != nil {
return releasetypes.ImageTagOverride{}, errors.Cause(err)
}
releaseImageUri, err := images.GetReleaseImageURI(r, name, repoName, tagOptions, assettypes.ImageTagConfiguration{}, false, false)
if err != nil {
return releasetypes.ImageTagOverride{}, errors.Cause(err)
}
imageTagOverride := releasetypes.ImageTagOverride{
Repository: repoName,
ReleaseUri: releaseImageUri,
}
return imageTagOverride, nil
}
| 72 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"os/exec"
"strings"
"github.com/pkg/errors"
)
func ExecCommand(cmd *exec.Cmd) (string, error) {
commandOutput, err := cmd.CombinedOutput()
commandOutputStr := strings.TrimSpace(string(commandOutput))
if err != nil {
return commandOutputStr, errors.Cause(err)
}
return commandOutputStr, nil
}
| 32 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package release
import (
"fmt"
"strings"
"sigs.k8s.io/yaml"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/aws/s3"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
type EksAReleases []anywherev1alpha1.EksARelease
func GetPreviousReleaseIfExists(r *releasetypes.ReleaseConfig) (*anywherev1alpha1.Release, error) {
emptyRelease := &anywherev1alpha1.Release{
Spec: anywherev1alpha1.ReleaseSpec{
Releases: []anywherev1alpha1.EksARelease{},
},
}
if r.DryRun {
return emptyRelease, nil
}
release := &anywherev1alpha1.Release{}
eksAReleaseManifestKey := artifactutils.GetManifestFilepaths(r.DevRelease, r.Weekly, r.BundleNumber, constants.ReleaseKind, r.BuildRepoBranchName, r.ReleaseDate)
eksAReleaseManifestUrl := fmt.Sprintf("%s/%s", r.CDN, eksAReleaseManifestKey)
if s3.KeyExists(r.ReleaseBucket, eksAReleaseManifestKey) {
contents, err := filereader.ReadHttpFile(eksAReleaseManifestUrl)
if err != nil {
return nil, fmt.Errorf("Error reading releases manifest from S3: %v", err)
}
if err = yaml.Unmarshal(contents, release); err != nil {
return nil, fmt.Errorf("Error unmarshaling releases manifest from [%s]: %v", eksAReleaseManifestUrl, err)
}
return release, nil
}
return emptyRelease, nil
}
func (releases EksAReleases) AppendOrUpdateRelease(r anywherev1alpha1.EksARelease) EksAReleases {
currentReleaseSemver := strings.Split(r.Version, "+")[0]
for i, release := range releases {
existingReleaseSemver := strings.Split(release.Version, "+")[0]
if currentReleaseSemver == existingReleaseSemver {
releases[i] = r
fmt.Println("Updating existing release in releases manifest")
return releases
}
}
releases = append(releases, r)
fmt.Println("Adding new release to releases manifest")
return releases
}
| 77 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package slices
func SliceContains(s []string, str string) bool {
for _, elem := range s {
if elem == str {
return true
}
}
return false
}
func SliceEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, v := range a {
m[v]++
}
for _, v := range b {
if _, ok := m[v]; !ok {
return false
}
m[v] -= 1
if m[v] == 0 {
delete(m, v)
}
}
return len(m) == 0
}
| 46 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package version
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/filereader"
"github.com/aws/eks-anywhere/release/pkg/git"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
const FakeComponentChecksum = "abcdef1"
type ProjectVersioner interface {
patchVersion() (string, error)
}
func BuildComponentVersion(versioner ProjectVersioner, componentCheckSum string) (string, error) {
patchVersion, err := versioner.patchVersion()
if err != nil {
return "", err
}
return fmt.Sprintf("%s+%s", patchVersion, componentCheckSum), nil
}
type Versioner struct {
repoSource string
pathToProject string
}
func NewVersioner(pathToProject string) *Versioner {
return &Versioner{pathToProject: pathToProject}
}
func (v *Versioner) patchVersion() (string, error) {
projectSource := filepath.Join(v.repoSource, v.pathToProject)
out, err := git.DescribeTag(projectSource)
if err != nil {
return "", errors.Wrapf(err, "failed executing git describe to get version in [%s]", projectSource)
}
gitVersion := strings.Split(out, "-")
gitTag := gitVersion[0]
return gitTag, nil
}
type VersionerWithGITTAG struct {
Versioner
folderWithGITTAG string
sourcedFromBranch string
releaseConfig *releasetypes.ReleaseConfig
}
func NewVersionerWithGITTAG(repoSource, pathToProject, sourcedFromBranch string, releaseConfig *releasetypes.ReleaseConfig) *VersionerWithGITTAG {
return &VersionerWithGITTAG{
folderWithGITTAG: pathToProject,
Versioner: Versioner{repoSource: repoSource, pathToProject: pathToProject},
sourcedFromBranch: sourcedFromBranch,
releaseConfig: releaseConfig,
}
}
func NewMultiProjectVersionerWithGITTAG(repoSource, pathToRootFolder, pathToMainProject, sourcedFromBranch string, releaseConfig *releasetypes.ReleaseConfig) *VersionerWithGITTAG {
return &VersionerWithGITTAG{
folderWithGITTAG: pathToMainProject,
Versioner: Versioner{repoSource: repoSource, pathToProject: pathToRootFolder},
sourcedFromBranch: sourcedFromBranch,
releaseConfig: releaseConfig,
}
}
func (v *VersionerWithGITTAG) patchVersion() (string, error) {
return filereader.ReadGitTag(v.folderWithGITTAG, v.releaseConfig.BuildRepoSource, v.sourcedFromBranch)
}
type cliVersioner struct {
Versioner
cliVersion string
}
func NewCliVersioner(cliVersion, pathToProject string) *cliVersioner {
return &cliVersioner{
cliVersion: cliVersion,
Versioner: Versioner{pathToProject: pathToProject},
}
}
func (v *cliVersioner) patchVersion() (string, error) {
return v.cliVersion, nil
}
func GenerateComponentHash(hashes []string, dryRun bool) string {
b := make([][]byte, len(hashes))
for i, str := range hashes {
b[i] = []byte(str)
}
joinByteArrays := bytes.Join(b, []byte(""))
hash := sha256.Sum256(joinByteArrays)
hashStr := hex.EncodeToString(hash[:])[:7]
return hashStr
}
func GenerateManifestHash(r *releasetypes.ReleaseConfig, manifestArtifact *releasetypes.ManifestArtifact) (string, error) {
if r.DryRun {
return FakeComponentChecksum, nil
}
manifestContents, err := os.ReadFile(filepath.Join(manifestArtifact.ArtifactPath, manifestArtifact.ReleaseName))
if err != nil {
return "", errors.Wrapf(err, "failed reading manifest contents from [%s]", manifestArtifact.ArtifactPath)
}
hash := sha256.Sum256(manifestContents)
hashStr := hex.EncodeToString(hash[:])
return hashStr, nil
}
| 141 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/test/framework"
)
const (
adotTargetNamespace = "observability"
adotPackageName = "adot"
adotPackagePrefix = "generated"
)
func runCuratedPackagesAdotInstall(test *framework.ClusterE2ETest) {
test.SetPackageBundleActive()
test.CreateNamespace(adotTargetNamespace)
test.InstallCuratedPackage(adotPackageName, adotPackagePrefix+"-"+adotPackageName,
kubeconfig.FromClusterName(test.ClusterName),
"--set mode=deployment")
test.VerifyAdotPackageInstalled(adotPackagePrefix+"-"+adotPackageName, adotTargetNamespace)
}
func runCuratedPackagesAdotInstallWithUpdate(test *framework.ClusterE2ETest) {
test.SetPackageBundleActive()
test.CreateNamespace(adotTargetNamespace)
test.InstallCuratedPackage(adotPackageName, adotPackagePrefix+"-"+adotPackageName,
kubeconfig.FromClusterName(test.ClusterName),
"--set mode=deployment")
test.VerifyAdotPackageInstalled(adotPackagePrefix+"-"+adotPackageName, adotTargetNamespace)
test.VerifyAdotPackageDeploymentUpdated(adotPackagePrefix+"-"+adotPackageName, adotTargetNamespace)
test.VerifyAdotPackageDaemonSetUpdated(adotPackagePrefix+"-"+adotPackageName, adotTargetNamespace)
}
func runCuratedPackagesAdotInstallSimpleFlow(test *framework.ClusterE2ETest) {
test.WithCluster(runCuratedPackagesAdotInstall)
}
func runCuratedPackagesAdotInstallUpdateFlow(test *framework.ClusterE2ETest) {
test.WithCluster(runCuratedPackagesAdotInstallWithUpdate)
}
func runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOnHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneNoTaints, framework.ValidateControlPlaneLabels)
runCuratedPackagesAdotInstall(test)
test.DeleteCluster()
test.PowerOffHardware()
test.ValidateHardwareDecommissioned()
}
| 56 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"context"
"fmt"
"io"
"net/http"
"os"
"regexp"
"github.com/aws/eks-anywhere/test/framework"
)
const (
bottlerocketOSFileName = "bottlerocket.img.gz"
airgapUsername = "airgap"
bundleReleasePathFromArtifacts = "./eks-anywhere-downloads/bundle-release.yaml"
)
// runAirgapConfigFlow run airgap deployment but allow bootstrap cluster to access local peers.
func runAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs string) {
test.GenerateClusterConfig()
test.DownloadArtifacts()
test.ExtractDownloadedArtifacts()
test.DownloadImages()
test.AirgapDockerContainers(localCIDRs)
test.CreateAirgappedUser(localCIDRs)
test.AssertAirgappedNetwork()
test.ImportImages()
test.CreateCluster(
framework.WithSudo(airgapUsername),
framework.WithBundlesOverride(bundleReleasePathFromArtifacts), // generated by ExtractDownloadArtifacts
)
test.DeleteCluster()
}
func runTinkerbellAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs string) {
test.DownloadArtifacts()
test.ExtractDownloadedArtifacts()
brContent, err := os.ReadFile(bundleReleasePathFromArtifacts)
if err != nil {
test.T.Fatalf("Cannot read bundleRelease file: %v", err)
}
server := downloadAndServeTinkerbellArtifacts(test.T, brContent)
defer server.Shutdown(context.Background())
test.GenerateClusterConfig()
test.DownloadImages(
framework.WithBundlesOverride(bundleReleasePathFromArtifacts),
)
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.AirgapDockerContainers(localCIDRs)
test.CreateAirgappedUser(localCIDRs)
test.AssertAirgappedNetwork()
test.ImportImages()
test.CreateCluster(
// airgap user should be airgapped through iptables
framework.WithSudo(airgapUsername),
framework.WithBundlesOverride(bundleReleasePathFromArtifacts), // generated by ExtractDownloadArtifacts
framework.WithForce(),
framework.WithControlPlaneWaitTimeout("20m"))
test.StopIfFailed()
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
func runTinkerbellAirgapConfigProxyFlow(test *framework.ClusterE2ETest, localCIDRs string) {
test.DownloadArtifacts()
test.ExtractDownloadedArtifacts()
// for testing proxy feature in an air gapped env
// we have to download hook images on the admin host
brContent, err := os.ReadFile(bundleReleasePathFromArtifacts)
if err != nil {
test.T.Fatalf("Cannot read bundleRelease file: %v", err)
}
server := downloadAndServeTinkerbellArtifacts(test.T, brContent)
defer server.Shutdown(context.Background())
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.AirgapDockerContainers(localCIDRs)
test.CreateAirgappedUser(localCIDRs)
test.AssertAirgappedNetwork()
test.CreateCluster(
// airgap user should be airgapped through iptables
framework.WithSudo(airgapUsername),
framework.WithBundlesOverride(bundleReleasePathFromArtifacts), // generated by ExtractDownloadArtifacts
framework.WithForce(),
framework.WithControlPlaneWaitTimeout("30m"))
test.StopIfFailed()
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
func downloadFile(url string, output string) error {
out, err := os.Create(output)
if err != nil {
return err
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad status: %s", resp.Status)
}
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}
func downloadAndServeTinkerbellArtifacts(t framework.T, bundleRelease []byte) *http.Server {
initramfsUrl := regexp.MustCompile(`https://.*/hook/.*initramfs-x86_64`).Find(bundleRelease)
if initramfsUrl == nil {
t.Fatalf("Cannot find initramfsUrl from release bundle")
}
vmlinuzUrl := regexp.MustCompile(`https://.*/hook/.*vmlinuz-x86_64`).Find(bundleRelease)
if vmlinuzUrl == nil {
t.Fatalf("Cannot find vmlinuzUrl from release bundle")
}
brOsUrl := regexp.MustCompile(`https://.*/raw/1-24/.*bottlerocket.*amd64.img.gz`).Find(bundleRelease)
if brOsUrl == nil {
t.Fatalf("Cannot find bottlerocketOS url from release bundle")
}
dir, err := os.MkdirTemp("", "tinkerbell_artifacts_")
if err != nil {
t.Fatalf("Cannot create temporary directory to serve Tinkerbell artifacts %v", err)
}
t.Logf("Created directory for holding local tinkerbell artifacts: %s", dir)
t.Logf("Download Initramfs from: %s", initramfsUrl)
err = downloadFile(string(initramfsUrl), dir+"/initramfs-x86_64")
if err != nil {
t.Fatal(err)
}
t.Logf("Download vmlinuz from %s", vmlinuzUrl)
err = downloadFile(string(vmlinuzUrl), dir+"/vmlinuz-x86_64")
if err != nil {
t.Fatal(err)
}
t.Logf("Download bottlerocket OS from %s", brOsUrl)
err = downloadFile(string(brOsUrl), dir+"/"+bottlerocketOSFileName)
if err != nil {
t.Fatal(err)
}
t.Log("Downloaded Bottlerocket OS")
server := &http.Server{Addr: ":8080", Handler: http.FileServer(http.Dir(dir))}
go func() {
t.Log("Start local file server at :8080")
server.ListenAndServe()
t.Log("Local file server is shutdown")
err = os.RemoveAll(dir)
if err != nil {
t.Logf("Temporary tinkerbell artifacts cannot be cleaned: %v", err)
} else {
t.Log("Temporary tinkerbell artifacts have been cleaned")
}
}()
return server
}
func runDockerAirgapConfigFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.DownloadArtifacts()
test.ExtractDownloadedArtifacts()
test.DownloadImages()
test.ChangeInstanceSecurityGroup(os.Getenv(framework.RegistryMirrorAirgappedSecurityGroup))
test.ImportImages()
test.CreateCluster(framework.WithBundlesOverride(bundleReleasePathFromArtifacts))
test.DeleteCluster(framework.WithBundlesOverride(bundleReleasePathFromArtifacts))
test.ChangeInstanceSecurityGroup(os.Getenv(framework.RegistryMirrorDefaultSecurityGroup))
}
| 196 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"context"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runAutoImportFlow(test *framework.ClusterE2ETest, provider *framework.VSphere) {
test.GenerateClusterConfig()
test.CreateCluster()
templates := getMachineConfigs(test)
test.DeleteCluster()
deleteTemplates(test, provider, templates)
}
func getMachineConfigs(test *framework.ClusterE2ETest) map[string]v1alpha1.VSphereMachineConfig {
test.T.Log("Getting vsphere machine configs to extract template and resource pool")
machineConfigs := test.GetEksaVSphereMachineConfigs()
uniqueMachineConfigs := make(map[string]v1alpha1.VSphereMachineConfig, len(machineConfigs))
for _, m := range machineConfigs {
uniqueMachineConfigs[m.Spec.Template+m.Spec.ResourcePool] = m
}
return uniqueMachineConfigs
}
func deleteTemplates(test *framework.ClusterE2ETest, provider *framework.VSphere, machineConfigs map[string]v1alpha1.VSphereMachineConfig) {
ctx := context.Background()
for _, machineConfig := range machineConfigs {
test.T.Logf("Deleting vSphere template: %s", machineConfig.Spec.Template)
err := provider.GovcClient.DeleteTemplate(ctx, machineConfig.Spec.ResourcePool, machineConfig.Spec.Template)
if err != nil {
test.T.Errorf("Failed deleting template [%s]: %v", machineConfig.Spec.Template, err)
}
}
}
| 42 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/test/framework"
)
func runAutoscalerWithMetricsServerSimpleFlow(test *framework.ClusterE2ETest) {
test.WithCluster(func(e *framework.ClusterE2ETest) {
autoscalerName := "cluster-autoscaler"
metricServerName := "metrics-server"
targetNamespace := "eksa-packages"
test.InstallAutoScalerWithMetricServer(targetNamespace)
test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withMgmtCluster(test))
})
}
| 19 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runAWSIamAuthFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateAWSIamAuth()
test.StopIfFailed()
test.DeleteCluster()
}
func runUpgradeFlowWithAWSIamAuth(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateAWSIamAuth()
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(updateVersion)
test.ValidateAWSIamAuth()
test.StopIfFailed()
test.DeleteCluster()
}
func runTinkerbellAWSIamAuthFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateAWSIamAuth()
test.StopIfFailed()
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
| 40 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"fmt"
"path/filepath"
"time"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/test/framework"
)
const (
cmPackageName = "cert-manager"
)
func runCertManagerRemoteClusterInstallSimpleFlow(test *framework.MulticlusterE2ETest) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(e *framework.WorkloadCluster) {
e.GenerateClusterConfig()
e.CreateCluster()
e.VerifyPackageControllerNotInstalled()
test.ManagementCluster.SetPackageBundleActive()
packageName := "cert-manager"
packagePrefix := "test"
packageFile := e.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace)
test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName))
e.VerifyCertManagerPackageInstalled(packagePrefix, EksaPackagesNamespace, cmPackageName, withMgmtClusterSetup(test.ManagementCluster))
e.DeleteCluster()
})
time.Sleep(5 * time.Minute)
test.DeleteManagementCluster()
}
func withMgmtClusterSetup(cluster *framework.ClusterE2ETest) *types.Cluster {
return &types.Cluster{
Name: cluster.ClusterName,
KubeconfigFile: filepath.Join(cluster.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", cluster.ClusterName)),
ExistingManagement: true,
}
}
| 45 |
eks-anywhere | aws | Go | //go:build e2e && (cloudstack || all_providers)
// +build e2e
// +build cloudstack all_providers
package e2e
import (
"testing"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/test/framework"
)
// AWS IAM Auth
func TestCloudStackKubernetes123AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runAWSIamAuthFlow(test)
}
func TestCloudStackKubernetes124AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runAWSIamAuthFlow(test)
}
// TODO: Add TestCloudStackKubernetes123to124AWSIamAuthUpgrade
func TestCloudStackKubernetes123to124AWSIamAuthUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runUpgradeFlowWithAWSIamAuth(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(provider.Redhat124Template()),
)
}
// Curated packages test
func TestCloudStackKubernetes123RedhatCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestCloudStackKubernetes124RedhatCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestCloudStackKubernetes123RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestCloudStackKubernetes124RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestCloudStackKubernetes123RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestCloudStackKubernetes124RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestCloudStackKubernetes123RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestCloudStackKubernetes124RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestCloudStackKubernetes123RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestCloudStackKubernetes124RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestCloudStackKubernetes123RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestCloudStackKubernetes124RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestCloudStackKubernetes123RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test)
}
func TestCloudStackKubernetes124RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test)
}
func TestCloudStackKubernetes123RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestCloudStackKubernetes124RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestCloudStackKubernetes123RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestCloudStackKubernetes124RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestCloudStackKubernetes123RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestCloudStackKubernetes124RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
"my-packages-test", EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
// Download artifacts
func TestCloudStackDownloadArtifacts(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runDownloadArtifactsFlow(test)
}
// Flux
func TestCloudStackKubernetes123GithubFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithFluxGithub(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
func TestCloudStackKubernetes124GithubFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithFluxGithub(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
func TestCloudStackKubernetes123GitFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithFluxGit(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
func TestCloudStackKubernetes124GitFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithFluxGit(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
// TODO: Add 1.23 to 1.24 flux upgrade tests
func TestCloudStackKubernetes123To124GitFluxUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(t,
provider,
framework.WithFluxGit(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFlowWithFlux(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(provider.Redhat124Template()),
)
}
func TestCloudStackKubernetes123InstallGitFluxDuringUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFlowWithFlux(
test,
v1alpha1.Kube123,
framework.WithFluxGit(),
framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)),
)
}
func TestCloudStackKubernetes124InstallGitFluxDuringUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := framework.NewClusterE2ETest(t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFlowWithFlux(
test,
v1alpha1.Kube124,
framework.WithFluxGit(),
framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)),
)
}
// Labels
func TestCloudStackKubernetes123LabelsAndNodeNameRedhat(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t,
framework.WithCloudStackRedhat123(),
),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder),
api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName,
api.WithCount(1),
api.WithLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder),
),
),
)
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneFailureDomainLabels, framework.ValidateControlPlaneNodeNameMatchCAPIMachineName)
test.ValidateWorkerNodes(framework.ValidateWorkerNodeFailureDomainLabels, framework.ValidateWorkerNodeNameMatchCAPIMachineName)
test.DeleteCluster()
}
func TestCloudStackKubernetes124LabelsAndNodeNameRedhat(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t,
framework.WithCloudStackRedhat124(),
),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder),
api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName,
api.WithCount(1),
api.WithLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder),
),
),
)
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneFailureDomainLabels, framework.ValidateControlPlaneNodeNameMatchCAPIMachineName)
test.ValidateWorkerNodes(framework.ValidateWorkerNodeFailureDomainLabels, framework.ValidateWorkerNodeNameMatchCAPIMachineName)
test.DeleteCluster()
}
func TestCloudStackKubernetes123RedhatLabelsUpgradeFlow(t *testing.T) {
provider := redhat123ProviderWithLabels(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runLabelsUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)),
api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)),
api.WithWorkerNodeGroup(worker2),
api.WithControlPlaneLabel(cpKey1, cpVal1),
),
)
}
func TestCloudStackKubernetes124RedhatLabelsUpgradeFlow(t *testing.T) {
provider := redhat124ProviderWithLabels(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runLabelsUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)),
api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)),
api.WithWorkerNodeGroup(worker2),
api.WithControlPlaneLabel(cpKey1, cpVal1),
),
)
}
func redhat123ProviderWithLabels(t *testing.T) *framework.CloudStack {
return framework.NewCloudStack(t,
framework.WithCloudStackWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(worker0, api.WithCount(2),
api.WithLabel(key1, val2)),
),
framework.WithCloudStackWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithCloudStackWorkerNodeGroup(
worker2,
framework.WithWorkerNodeGroup(worker2, api.WithCount(1),
api.WithLabel(key2, val2)),
),
framework.WithCloudStackRedhat123(),
)
}
func redhat124ProviderWithLabels(t *testing.T) *framework.CloudStack {
return framework.NewCloudStack(t,
framework.WithCloudStackWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(worker0, api.WithCount(2),
api.WithLabel(key1, val2)),
),
framework.WithCloudStackWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithCloudStackWorkerNodeGroup(
worker2,
framework.WithWorkerNodeGroup(worker2, api.WithCount(1),
api.WithLabel(key2, val2)),
),
framework.WithCloudStackRedhat124(),
)
}
// Multicluster
func TestCloudStackKubernetes123MulticlusterWorkloadCluster(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterFlow(test)
}
func TestCloudStackKubernetes124MulticlusterWorkloadCluster(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterFlow(test)
}
// WL cluster with prev release version from management cluster
func TestCloudStackKubernetes123MulticlusterWorkloadClusterPrevVersion(t *testing.T) {
prevLatestRel := prevLatestMinorRelease(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterPrevVersionCreateFlow(test, prevLatestRel)
}
// TODO: Add TestCloudStackUpgradeKubernetes124MulticlusterWorkloadClusterWithGithubFlux
func TestCloudStackUpgradeKubernetes124MulticlusterWorkloadClusterWithGithubFlux(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithFluxGithub(),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithFluxGithub(),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterFlowWithGitOps(
test,
framework.WithClusterUpgradeGit(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(3),
api.WithWorkerNodeCount(3),
),
provider.WithProviderUpgradeGit(
provider.Redhat124Template(),
),
)
}
func TestCloudStackKubernetes123WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
runTestManagementClusterUpgradeSideEffects(t, cloudstack, anywherev1.RedHat, anywherev1.Kube123)
}
func TestCloudStackKubernetes124WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
runTestManagementClusterUpgradeSideEffects(t, cloudstack, anywherev1.RedHat, anywherev1.Kube124)
}
// OIDC
func TestCloudStackKubernetes123OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestCloudStackKubernetes124OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
// TODO: Add TestCloudStackKubernetes123To124OIDCUpgrade
func TestCloudStackKubernetes123To124OIDCUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFlowWithOIDC(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(provider.Redhat124Template()),
)
}
// Proxy config
func TestCloudStackKubernetes123RedhatProxyConfig(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithProxy(framework.CloudstackProxyRequiredEnvVars),
)
runProxyConfigFlow(test)
}
func TestCloudStackKubernetes124RedhatProxyConfig(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithProxy(framework.CloudstackProxyRequiredEnvVars),
)
runProxyConfigFlow(test)
}
// Proxy config multicluster
func TestCloudStackKubernetes123RedhatProxyConfigAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
cloudstack.WithRedhat123(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithProxy(framework.CloudstackProxyRequiredEnvVars),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
),
cloudstack.WithRedhat123(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackKubernetes124RedhatProxyConfigAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
cloudstack.WithRedhat124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithProxy(framework.CloudstackProxyRequiredEnvVars),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
),
cloudstack.WithRedhat124(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
// Registry mirror
func TestCloudStackKubernetes123RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithRegistryMirrorInsecureSkipVerify(constants.CloudStackProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestCloudStackKubernetes124RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithRegistryMirrorInsecureSkipVerify(constants.CloudStackProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestCloudStackKubernetes123RedhatRegistryMirrorAndCert(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestCloudStackKubernetes124RedhatRegistryMirrorAndCert(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestCloudStackKubernetes123RedhatAuthenticatedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithAuthenticatedRegistryMirror(constants.CloudStackProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestCloudStackKubernetes124RedhatAuthenticatedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithAuthenticatedRegistryMirror(constants.CloudStackProviderName),
)
runRegistryMirrorConfigFlow(test)
}
// Simpleflow
func TestCloudStackKubernetes123SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes124SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes123ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(5)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes124ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(5)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes123MultiEndpointSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123(),
framework.WithCloudStackFillers(framework.UpdateAddCloudStackAz2())),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes124MultiEndpointSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124(),
framework.WithCloudStackFillers(framework.UpdateAddCloudStackAz2())),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes123DifferentNamespaceSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123(),
framework.WithCloudStackFillers(api.WithCloudStackConfigNamespace(clusterNamespace),
api.WithCloudStackConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes124DifferentNamespaceSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124(),
framework.WithCloudStackFillers(api.WithCloudStackConfigNamespace(clusterNamespace),
api.WithCloudStackConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)),
)
runSimpleFlow(test)
}
// Cilium Policy
func TestCloudStackKubernetes123CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
)
runSimpleFlow(test)
}
func TestCloudStackKubernetes124CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
)
runSimpleFlow(test)
}
// TODO: Add TestCloudStackKubernetes123RedhatTo124UpgradeCiliumPolicyEnforcementMode
func TestCloudStackKubernetes123RedhatTo124UpgradeCiliumPolicyEnforcementMode(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
provider.WithProviderUpgrade(provider.Redhat124Template()),
)
}
// Stacked etcd
func TestCloudStackKubernetes123StackedEtcdRedhat(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
func TestCloudStackKubernetes124StackedEtcdRedhat(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
// Taints
func TestCloudStackKubernetes123RedhatTaintsUpgradeFlow(t *testing.T) {
provider := redhat123ProviderWithTaints(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runTaintsUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker2, api.WithNoTaints()),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
func TestCloudStackKubernetes124RedhatTaintsUpgradeFlow(t *testing.T) {
provider := redhat124ProviderWithTaints(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runTaintsUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker2, api.WithNoTaints()),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
func redhat123ProviderWithTaints(t *testing.T) *framework.CloudStack {
return framework.NewCloudStack(t,
framework.WithCloudStackWorkerNodeGroup(
worker0,
framework.NoScheduleWorkerNodeGroup(worker0, 2),
),
framework.WithCloudStackWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithCloudStackWorkerNodeGroup(
worker2,
framework.PreferNoScheduleWorkerNodeGroup(worker2, 1),
),
framework.WithCloudStackRedhat123(),
)
}
func redhat124ProviderWithTaints(t *testing.T) *framework.CloudStack {
return framework.NewCloudStack(t,
framework.WithCloudStackWorkerNodeGroup(
worker0,
framework.NoScheduleWorkerNodeGroup(worker0, 2),
),
framework.WithCloudStackWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithCloudStackWorkerNodeGroup(
worker2,
framework.PreferNoScheduleWorkerNodeGroup(worker2, 1),
),
framework.WithCloudStackRedhat124(),
)
}
// Upgrade
func TestCloudStackKubernetes123RedhatAndRemoveWorkerNodeGroups(t *testing.T) {
provider := framework.NewCloudStack(t,
framework.WithCloudStackWorkerNodeGroup(
"worker-1",
framework.WithWorkerNodeGroup("workers-1", api.WithCount(2)),
),
framework.WithCloudStackWorkerNodeGroup(
"worker-2",
framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)),
),
framework.WithCloudStackRedhat123(),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterUpgrade(
api.RemoveWorkerNodeGroup("workers-2"),
api.WithWorkerNodeGroup("workers-1", api.WithCount(1)),
),
provider.WithNewCloudStackWorkerNodeGroup(
"worker-1",
framework.WithWorkerNodeGroup(
"workers-3",
api.WithCount(1),
),
),
)
}
func TestCloudStackKubernetes124RedhatAndRemoveWorkerNodeGroups(t *testing.T) {
provider := framework.NewCloudStack(t,
framework.WithCloudStackWorkerNodeGroup(
"worker-1",
framework.WithWorkerNodeGroup("workers-1", api.WithCount(2)),
),
framework.WithCloudStackWorkerNodeGroup(
"worker-2",
framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)),
),
framework.WithCloudStackRedhat124(),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(
api.RemoveWorkerNodeGroup("workers-2"),
api.WithWorkerNodeGroup("workers-1", api.WithCount(1)),
),
provider.WithNewCloudStackWorkerNodeGroup(
"worker-1",
framework.WithWorkerNodeGroup(
"workers-3",
api.WithCount(1),
),
),
)
}
// TODO: 1.23 to 1.24 upgrade tests
func TestCloudStackKubernetes123To124RedhatUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithStackedEtcdTopology()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterFiller(api.WithStackedEtcdTopology()),
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(provider.Redhat124Template()),
)
}
func TestCloudStackKubernetes123To124RedhatUnstackedUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(provider.Redhat124Template()),
)
}
func TestCloudStackKubernetes123RedhatTo124StackedEtcdUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(provider.Redhat124Template()),
)
}
// TODO: investigate these tests further as they pass even without the expected behavior(upgrade should fail the first time and continue from the checkpoint on second upgrade)wq
func TestCloudStackKubernetes123RedhatTo124UpgradeWithCheckpoint(t *testing.T) {
var clusterOpts []framework.ClusterE2ETestOpt
var clusterOpts2 []framework.ClusterE2ETestOpt
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)), framework.ExpectFailure(true),
provider.WithProviderUpgrade(provider.Redhat123Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false"))
commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")}
clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)), framework.ExpectFailure(false),
provider.WithProviderUpgrade(provider.Redhat124Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true"))
runUpgradeFlowWithCheckpoint(
test,
v1alpha1.Kube124,
clusterOpts,
clusterOpts2,
commandOpts,
)
}
func TestCloudStackKubernetes123RedhatUpgradeFromLatestMinorReleaseAlwaysNetworkPolicy(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube123,
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
provider.WithProviderUpgrade(
provider.Redhat123Template(), // Set the template so it doesn't get autoimported
),
)
}
func TestCloudStackKubernetes124RedhatUpgradeFromLatestMinorReleaseAlwaysNetworkPolicy(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
provider.WithProviderUpgrade(
provider.Redhat124Template(), // Set the template so it doesn't get autoimported
),
)
}
func TestCloudStackKubernetes123RedhatControlPlaneNodeUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterUpgrade(api.WithControlPlaneCount(3)),
)
}
func TestCloudStackKubernetes124RedhatControlPlaneNodeUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithControlPlaneCount(3)),
)
}
func TestCloudStackKubernetes123RedhatWorkerNodeUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)),
)
}
func TestCloudStackKubernetes124RedhatWorkerNodeUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)),
)
}
func TestCloudStackKubernetes123UpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube123,
provider.WithProviderUpgrade(),
)
}
func TestCloudStackKubernetes124UpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
provider.WithProviderUpgrade(),
)
}
// TODO: More 1.23 to 1.24 Upgrade tests
func TestCloudStackKubernetes123To124RedhatMultipleFieldsUpgrade(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
provider.WithProviderUpgrade(
provider.Redhat124Template(),
framework.UpdateLargerCloudStackComputeOffering(),
),
)
}
func TestCloudStackKubernetes123AddRemoveAz(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
test.GenerateClusterConfig()
test.CreateCluster()
test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{
provider.WithProviderUpgrade(
framework.UpdateAddCloudStackAz2(),
),
})
test.StopIfFailed()
test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{
provider.WithProviderUpgrade(
framework.RemoveAllCloudStackAzs(),
framework.UpdateAddCloudStackAz1(),
),
})
test.StopIfFailed()
test.DeleteCluster()
}
func TestCloudStackKubernetes124AddRemoveAz(t *testing.T) {
provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat124())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
test.GenerateClusterConfig()
test.CreateCluster()
test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{
provider.WithProviderUpgrade(
framework.UpdateAddCloudStackAz2(),
),
})
test.StopIfFailed()
test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{
provider.WithProviderUpgrade(
framework.RemoveAllCloudStackAzs(),
framework.UpdateAddCloudStackAz1(),
),
})
test.StopIfFailed()
test.DeleteCluster()
}
// This test is skipped as registry mirror was not configured for CloudStack
func TestCloudStackKubernetes123RedhatAirgappedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t,
framework.WithCloudStackRedhat123(),
framework.WithCloudStackFillers(
framework.RemoveAllCloudStackAzs(),
framework.UpdateAddCloudStackAz3(),
),
),
framework.WithClusterFiller(
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
// framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), there is a bug that the etcd node download etcd from internet
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName),
)
runAirgapConfigFlow(test, "10.0.0.1/8")
}
func TestCloudStackKubernetes124RedhatAirgappedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t,
framework.WithCloudStackRedhat124(),
framework.WithCloudStackFillers(
framework.RemoveAllCloudStackAzs(),
framework.UpdateAddCloudStackAz3(),
),
),
framework.WithClusterFiller(
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
// framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), there is a bug that the etcd node download etcd from internet
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName),
)
runAirgapConfigFlow(test, "10.0.0.1/8")
}
// Workload API
func TestCloudStackKubernetes123RedHatAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
test := framework.NewClusterE2ETest(
t, cloudstack, framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
cloudstack.WithRedhat123(),
)
test.CreateCluster()
test.LoadClusterConfigGeneratedByCLI()
// Run mgmt cluster API tests
tests := cloudstackAPIManagementClusterUpgradeTests(test, cloudstack)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
runCloudStackAPIUpgradeTest(t, test, tt)
})
}
test.StopIfFailed()
test.DeleteCluster()
}
func TestCloudStackKubernetes124RedHatAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
test := framework.NewClusterE2ETest(
t, cloudstack, framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
cloudstack.WithRedhat124(),
)
test.CreateCluster()
test.LoadClusterConfigGeneratedByCLI()
// Run mgmt cluster API tests
tests := cloudstackAPIManagementClusterUpgradeTests(test, cloudstack)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
runCloudStackAPIUpgradeTest(t, test, tt)
})
}
test.StopIfFailed()
test.DeleteCluster()
}
func TestCloudStackMulticlusterWorkloadClusterAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t, cloudstack, framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat123(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
),
cloudstack.WithRedhat123(),
),
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
),
cloudstack.WithRedhat124(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
tests := cloudStackAPIWorkloadUpgradeTests(wc, cloudstack)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
runCloudStackAPIWorkloadUpgradeTest(t, wc, tt)
})
}
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackMulticlusterWorkloadClusterNewCredentialsSecretsAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t, cloudstack, framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
),
api.CloudStackToConfigFiller(
api.WithCloudStackCredentialsRef("test-creds"),
),
cloudstack.WithRedhat123(),
))
test.WithWorkloadClusters(framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
),
api.CloudStackToConfigFiller(
api.WithCloudStackCredentialsRef("test-creds"),
),
cloudstack.WithRedhat124(),
))
test.CreateManagementCluster()
test.ManagementCluster.CreateCloudStackCredentialsSecretFromEnvVar("test-creds", framework.CloudStackCredentialsAz1())
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackKubernetesRedHat123To124UpgradeFromLatestMinorReleaseAPI(t *testing.T) {
release := latestMinorRelease(t)
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
)
managementCluster.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
managementCluster.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
),
cloudstack.WithRedhat123(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
wc := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
)
wc.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
wc.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
),
cloudstack.WithRedhat123(),
)
test.WithWorkloadClusters(wc)
runMulticlusterUpgradeFromReleaseFlowAPI(
test,
release,
cloudstack.WithRedhat124(),
)
}
// Workload GitOps API
func TestCloudStackKubernetesRedHat123to124UpgradeFromLatestMinorReleaseGitHubFluxAPI(t *testing.T) {
release := latestMinorRelease(t)
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
)
managementCluster.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
managementCluster.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
),
cloudstack.WithRedhat123(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
wc := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
)
wc.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
wc.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
),
cloudstack.WithRedhat123(),
framework.WithFluxGithubConfig(),
)
test.WithWorkloadClusters(wc)
runMulticlusterUpgradeFromReleaseFlowAPIWithFlux(
test,
release,
cloudstack.WithRedhat124(),
)
}
func TestCloudStackMulticlusterWorkloadClusterGitHubFluxAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat123(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
),
cloudstack.WithRedhat123(),
),
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
),
cloudstack.WithRedhat124(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
tests := cloudStackAPIWorkloadUpgradeTests(wc, cloudstack)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
runCloudStackAPIWorkloadUpgradeTestWithFlux(t, test, wc, tt)
})
}
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackMulticlusterWorkloadClusterNewCredentialsSecretGitHubFluxAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat124(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
),
api.CloudStackToConfigFiller(
api.WithCloudStackCredentialsRef("test-creds"),
),
cloudstack.WithRedhat123(),
))
test.WithWorkloadClusters(framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
api.WithControlPlaneCount(1),
),
api.CloudStackToConfigFiller(
api.WithCloudStackCredentialsRef("test-creds"),
),
cloudstack.WithRedhat124(),
))
test.CreateManagementCluster()
test.ManagementCluster.CreateCloudStackCredentialsSecretFromEnvVar("test-creds", framework.CloudStackCredentialsAz1())
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackWorkloadClusterAWSIamAuthAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t, cloudstack, framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithAwsIamEnvVarCheck(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
),
framework.WithAwsIamConfig(),
cloudstack.WithRedhat123(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.ValidateAWSIamAuth()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackWorkloadClusterAWSIamAuthGithubFluxAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat123(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithAwsIamEnvVarCheck(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
),
framework.WithAwsIamConfig(),
cloudstack.WithRedhat123(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.ValidateAWSIamAuth()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackWorkloadClusterOIDCAuthAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t, cloudstack, framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithOIDCEnvVarCheck(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
),
framework.WithOIDCClusterConfig(t),
cloudstack.WithRedhat123(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.ValidateOIDC()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestCloudStackWorkloadClusterOIDCAuthGithubFluxAPI(t *testing.T) {
cloudstack := framework.NewCloudStack(t)
managementCluster := framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
cloudstack.WithRedhat123(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
cloudstack,
framework.WithClusterName(test.NewWorkloadClusterName()),
framework.WithEnvVar(features.FullLifecycleAPIEnvVar, "true"),
framework.WithOIDCEnvVarCheck(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithStackedEtcdTopology(),
),
framework.WithOIDCClusterConfig(t),
cloudstack.WithRedhat123(),
),
)
test.CreateManagementCluster()
// Create workload clusters
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.ValidateOIDC()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
| 2,198 |
eks-anywhere | aws | Go | //go:build e2e
package e2e
import (
"fmt"
"testing"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
corev1 "k8s.io/api/core/v1"
)
type cloudStackAPIUpgradeTestStep struct {
name string
configFiller api.ClusterConfigFiller
}
type cloudStackAPIUpgradeTest struct {
name string
// steps is a list are grouped updates to be applied during a test synchronously.
steps []cloudStackAPIUpgradeTestStep
}
func clusterPrefix(value string, prefix string) string {
return fmt.Sprintf("%s-%s", prefix, value)
}
func cloudStackAPIUpdateTestBaseStep(e *framework.ClusterE2ETest, cloudstack *framework.CloudStack) cloudStackAPIUpgradeTestStep {
clusterName := e.ClusterName
return cloudStackAPIUpgradeTestStep{
name: "setting base state",
configFiller: api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
// Add new WorkerNodeGroups
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-0", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithCount(1))),
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-1", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-1", clusterName), api.WithCount(1))),
cloudstack.WithRedhatVersion(e.ClusterConfig.Cluster.Spec.KubernetesVersion),
),
}
}
func cloudstackAPIManagementClusterUpgradeTests(e *framework.ClusterE2ETest, cloudstack *framework.CloudStack) []cloudStackAPIUpgradeTest {
clusterName := e.ClusterName
return []cloudStackAPIUpgradeTest{
{
name: "add and remove labels and taints",
steps: []cloudStackAPIUpgradeTestStep{
cloudStackAPIUpdateTestBaseStep(e, cloudstack),
{
name: "adding label and taint to worker node groups",
configFiller: api.ClusterToConfigFiller(
api.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithLabel(key1, val2)),
api.WithWorkerNodeGroup(clusterPrefix("md-1", clusterName), api.WithTaint(framework.NoExecuteTaint())),
),
},
{
name: "removing label and taint from worker node groups",
configFiller: api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-0", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithCount(1))),
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-1", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-1", clusterName), api.WithCount(1))),
cloudstack.WithRedhatVersion(e.ClusterConfig.Cluster.Spec.KubernetesVersion),
),
},
},
},
{
name: "scale up and down worker node group",
steps: []cloudStackAPIUpgradeTestStep{
cloudStackAPIUpdateTestBaseStep(e, cloudstack),
{
name: "scaling up worker node group",
configFiller: api.ClusterToConfigFiller(
api.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithCount(2)),
),
},
{
name: "scaling down worker node group",
configFiller: api.ClusterToConfigFiller(
api.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithCount(1)),
),
},
},
},
{
name: "replace existing worker node groups",
steps: []cloudStackAPIUpgradeTestStep{
cloudStackAPIUpdateTestBaseStep(e, cloudstack),
{
name: "replacing existing worker node groups",
configFiller: api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
// Add new WorkerNodeGroups
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-2", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-2", clusterName), api.WithCount(1))),
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-3", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-3", clusterName), api.WithCount(1))),
cloudstack.WithRedhatVersion(e.ClusterConfig.Cluster.Spec.KubernetesVersion),
),
},
},
},
}
}
func cloudStackAPIWorkloadUpgradeTests(wc *framework.WorkloadCluster, cloudstack *framework.CloudStack) []cloudStackAPIUpgradeTest {
clusterName := wc.ClusterName
return []cloudStackAPIUpgradeTest{
{
name: "add and remove labels and taints",
steps: []cloudStackAPIUpgradeTestStep{
cloudStackAPIUpdateTestBaseStep(wc.ClusterE2ETest, cloudstack),
{
name: "adding label and taint to worker node groups",
configFiller: api.ClusterToConfigFiller(
api.WithControlPlaneLabel(cpKey1, cpVal1),
api.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithLabel(key1, val2)),
api.WithWorkerNodeGroup(clusterPrefix("md-1", clusterName), api.WithTaint(framework.NoExecuteTaint())),
),
},
{
name: "removing label and taint from worker node groups",
configFiller: api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-0", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithCount(1))),
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-1", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-1", clusterName), api.WithCount(1))),
cloudstack.WithRedhatVersion(wc.ClusterConfig.Cluster.Spec.KubernetesVersion),
),
},
},
},
{
name: "scale up and down cp and worker node group ",
steps: []cloudStackAPIUpgradeTestStep{
cloudStackAPIUpdateTestBaseStep(wc.ClusterE2ETest, cloudstack),
{
name: "scaling up cp and worker node group",
configFiller: api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(3),
api.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithCount(2)),
),
),
},
{
name: "scaling down cp and worker node group",
configFiller: api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeGroup(clusterPrefix("md-0", clusterName), api.WithCount(1)),
),
),
},
},
},
{
name: "replace existing worker node groups and cilium policy enforcement mode",
steps: []cloudStackAPIUpgradeTestStep{
{
name: "replacing existing worker node groups + update cilium policy enforcement mode always",
configFiller: api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(
api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
// Add new WorkerNodeGroups
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-2", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-2", clusterName), api.WithCount(1))),
cloudstack.WithNewWorkerNodeGroup(clusterPrefix("md-3", clusterName), framework.WithWorkerNodeGroup(clusterPrefix("md-3", clusterName), api.WithCount(1))),
cloudstack.WithRedhatVersion(wc.ClusterConfig.Cluster.Spec.KubernetesVersion),
),
},
},
},
}
}
func runCloudStackAPIUpgradeTest(t *testing.T, test *framework.ClusterE2ETest, ut cloudStackAPIUpgradeTest) {
for _, step := range ut.steps {
t.Logf("Running API upgrade test: %s", step.name)
test.UpdateClusterConfig(step.configFiller)
test.ApplyClusterManifest()
test.ValidateClusterStateWithT(t)
}
}
func runCloudStackAPIWorkloadUpgradeTest(t *testing.T, wc *framework.WorkloadCluster, ut cloudStackAPIUpgradeTest) {
for _, step := range ut.steps {
t.Logf("Running API workload upgrade test: %s", step.name)
wc.UpdateClusterConfig(step.configFiller)
wc.ApplyClusterManifest()
wc.ValidateClusterStateWithT(t)
}
}
func runCloudStackAPIWorkloadUpgradeTestWithFlux(t *testing.T, test *framework.MulticlusterE2ETest, wc *framework.WorkloadCluster, ut cloudStackAPIUpgradeTest) {
for _, step := range ut.steps {
t.Logf("Running API upgrade test: %s", step.name)
test.PushWorkloadClusterToGit(wc, step.configFiller)
wc.ValidateClusterStateWithT(t)
}
}
| 213 |
eks-anywhere | aws | Go | //go:build conformance_e2e
// +build conformance_e2e
package e2e
import (
"testing"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runConformanceFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.RunConformanceTests()
test.StopIfFailed()
test.DeleteCluster()
}
func runTinkerbellConformanceFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.RunConformanceTests()
test.StopIfFailed()
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
func TestDockerKubernetes123ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestDockerKubernetes124ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestDockerKubernetes125ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestDockerKubernetes126ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes123ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes124ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes125ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes126ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes123BottleRocketThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes124BottleRocketThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes125BottleRocketThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestVSphereKubernetes126BottleRocketThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestCloudStackKubernetes123ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewCloudStack(t, framework.WithCloudStackRedhat123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestTinkerbellKubernetes123ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestTinkerbellKubernetes124ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestTinkerbellKubernetes125ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestTinkerbellKubernetes126ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestTinkerbellKubernetes123BottleRocketThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestTinkerbellKubernetes124BottleRocketThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestTinkerbellKubernetes125BottleRocketThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestTinkerbellKubernetes126BottleRocketThreeReplicasTwoWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(2),
)
runTinkerbellConformanceFlow(test)
}
func TestSnowKubernetes123ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestNutanixKubernetes123ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestNutanixKubernetes124ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestNutanixKubernetes125ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestNutanixKubernetes126ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
func TestNutanixKubernetes127ThreeWorkersConformanceFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runConformanceFlow(test)
}
| 326 |
eks-anywhere | aws | Go | // nolint
package e2e
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
const (
EksaPackageControllerHelmChartName = "eks-anywhere-packages"
EksaPackagesSourceRegistry = "public.ecr.aws/l0g8r8j6"
EksaPackageControllerHelmURI = "oci://" + EksaPackagesSourceRegistry + "/eks-anywhere-packages"
EksaPackageControllerHelmVersion = "0.2.20-eks-a-v0.0.0-dev-build.4894"
EksaPackageBundleURI = "oci://" + EksaPackagesSourceRegistry + "/eks-anywhere-packages-bundles"
EksaPackagesNamespace = "eksa-packages"
clusterNamespace = "test-namespace"
key1 = framework.LabelPrefix + "/" + "key1"
key2 = framework.LabelPrefix + "/" + "key2"
cpKey1 = framework.LabelPrefix + "/" + "cp-key1"
val1 = "val1"
val2 = "val2"
cpVal1 = "cp-val1"
nodeGroupLabel1 = "md-0"
nodeGroupLabel2 = "md-1"
worker0 = "worker-0"
worker1 = "worker-1"
worker2 = "worker-2"
fluxUserProvidedBranch = "testbranch"
fluxUserProvidedNamespace = "testns"
fluxUserProvidedPath = "test/testerson"
vsphereCpVmNumCpuUpdateVar = 4
vsphereCpVmMemoryUpdate = 16384
vsphereCpDiskGiBUpdateVar = 40
vsphereWlVmNumCpuUpdateVar = 4
vsphereWlVmMemoryUpdate = 16384
vsphereWlDiskGiBUpdate = 40
vsphereFolderUpdateVar = "/SDDC-Datacenter/vm/capv/e2eUpdate"
vsphereNetwork2UpdateVar = "/SDDC-Datacenter/network/sddc-cgw-network-2"
vsphereNetwork3UpdateVar = "/SDDC-Datacenter/network/sddc-cgw-network-3"
vsphereInvalidResourcePoolUpdateVar = "*/Resources/INVALID-ResourcePool"
vsphereResourcePoolVar = "T_VSPHERE_RESOURCE_POOL"
)
var (
EksaPackageControllerHelmValues = []string{"sourceRegistry=public.ecr.aws/l0g8r8j6"}
KubeVersions = []v1alpha1.KubernetesVersion{v1alpha1.Kube123, v1alpha1.Kube124, v1alpha1.Kube125, v1alpha1.Kube126, v1alpha1.Kube127}
)
| 52 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/test/framework"
)
func runCuratedPackageInstall(test *framework.ClusterE2ETest) {
test.SetPackageBundleActive()
test.GenerateSupportBundleOnCleanupIfTestFailed()
err := WaitForPackageToBeInstalled(test, context.Background(), "eks-anywhere-packages", 3*time.Minute)
if err != nil {
test.T.Fatalf("packages controller not in installed state: %s", err)
}
err = WaitForPackageToBeInstalled(test, context.Background(), "eks-anywhere-packages-crds", 3*time.Minute)
if err != nil {
test.T.Fatalf("packages controller crds not in installed state: %s", err)
}
packageName := "hello-eks-anywhere"
packagePrefix := "test"
packageFile := test.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace)
test.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ClusterName))
test.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withMgmtCluster(test))
}
func runCuratedPackageInstallSimpleFlow(test *framework.ClusterE2ETest) {
test.WithCluster(runCuratedPackageInstall)
}
func runDisabledCuratedPackage(test *framework.ClusterE2ETest) {
test.ValidatingNoPackageController()
}
func runDisabledCuratedPackageInstallSimpleFlow(test *framework.ClusterE2ETest) {
test.WithCluster(runDisabledCuratedPackage)
}
func runCuratedPackageRemoteClusterInstallSimpleFlow(test *framework.MulticlusterE2ETest) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(e *framework.WorkloadCluster) {
e.GenerateClusterConfig()
e.CreateCluster()
e.VerifyPackageControllerNotInstalled()
test.ManagementCluster.SetPackageBundleActive()
packageName := "hello-eks-anywhere"
packagePrefix := "test"
packageFile := e.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace)
test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName))
e.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withMgmtCluster(test.ManagementCluster))
e.DeleteCluster()
})
time.Sleep(5 * time.Minute)
test.DeleteManagementCluster()
}
func runCuratedPackageInstallTinkerbellSingleNodeFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOnHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneNoTaints, framework.ValidateControlPlaneLabels)
runCuratedPackageInstall(test)
test.DeleteCluster()
test.PowerOffHardware()
test.ValidateHardwareDecommissioned()
}
type resourcePredicate func(string, error) bool
func NoErrorPredicate(_ string, err error) bool {
return err == nil
}
// TODO turn them into generics using comparable once 1.18 is allowed
func StringMatchPredicate(s string) resourcePredicate {
return func(in string, err error) bool {
return err == nil && strings.Compare(s, in) == 0
}
}
func IntEqualPredicate(i int) resourcePredicate {
return func(in string, err error) bool {
inInt, err := strconv.Atoi(in)
return err == nil && inInt == i
}
}
func WaitForResource(
test *framework.ClusterE2ETest,
ctx context.Context,
resource string,
namespace string,
jsonpath string,
timeout time.Duration,
predicates ...resourcePredicate,
) error {
end := time.Now().Add(timeout)
if !strings.HasPrefix(jsonpath, "jsonpath") {
jsonpath = fmt.Sprintf("jsonpath='%s'", jsonpath)
}
command := []string{}
for time.Now().Before(end) {
out, err := test.KubectlClient.Execute(ctx, "get", "-n", namespace,
"--kubeconfig="+kubeconfig.FromClusterName(test.ClusterName),
"-o", jsonpath, resource)
outStr := out.String()
trimmed := strings.Trim(outStr, "'")
allPredicates := true
for _, f := range predicates {
allPredicates = allPredicates && f(trimmed, err)
}
if allPredicates {
return nil
}
time.Sleep(2 * time.Second)
}
return fmt.Errorf(
"timed out waiting for resource: %s [namespace: %s, jsonpath: %s, timeout: %s]",
command,
namespace,
jsonpath,
timeout,
)
}
func WaitForDaemonset(
test *framework.ClusterE2ETest,
ctx context.Context,
daemonsetName string,
namespace string,
numberOfNodes int,
timeout time.Duration,
) error {
return WaitForResource(
test,
ctx,
"daemonset/"+daemonsetName,
namespace,
"{.status.numberAvailable}",
timeout,
NoErrorPredicate,
IntEqualPredicate(numberOfNodes),
)
}
// Hackish way to get the latest bundle. This assumes no bundle is created outside of the normal PBC bundle fetch timer.
// This should be modified to get the bundle from the previous build step and use that only.
func WaitForLatestBundleToBeAvailable(
test *framework.ClusterE2ETest,
ctx context.Context,
timeout time.Duration,
) error {
return WaitForResource(test, ctx, "packagebundle", "eksa-packages", "{.items[0]}", timeout, NoErrorPredicate)
}
func WaitForPackageToBeInstalled(
test *framework.ClusterE2ETest,
ctx context.Context,
packageName string,
timeout time.Duration,
) error {
//--for=jsonpath isn't supported in v1.22. Update once it's supported
//_, err = test.KubectlClient.Execute(
// ctx, "wait", "--timeout", "1m",
// "--for", "jsonpath='{.status.state}'=installed",
// "package", packagePrefix, "--kubeconfig", kubeconfig,
// "-n", "eksa-packages",
//)
return WaitForResource(
test,
ctx,
"package/"+packageName,
fmt.Sprintf("%s-%s", EksaPackagesNamespace, test.ClusterName),
"{.status.state}",
timeout,
NoErrorPredicate,
StringMatchPredicate("installed"),
)
}
func GetLatestBundleFromCluster(test *framework.ClusterE2ETest) (string, error) {
bundleBytes, err := test.KubectlClient.ExecuteCommand(
context.Background(),
"get",
"packagebundle",
"-n", "eksa-packages",
"--kubeconfig="+kubeconfig.FromClusterName(test.ClusterName),
"-o", "jsonpath='{.items[0].metadata.name}'",
)
if err != nil {
return "", err
}
bundle := bundleBytes.String()
return strings.Trim(bundle, "'"), nil
}
// packageBundleURI uses a KubernetesVersion argument to complete a package
// bundle URI by adding the approprate tag.
func packageBundleURI(version v1alpha1.KubernetesVersion) string {
tag := "v" + strings.Replace(string(version), ".", "-", 1) + "-latest"
return fmt.Sprintf("%s:%s", EksaPackageBundleURI, tag)
}
func withMgmtCluster(cluster *framework.ClusterE2ETest) *types.Cluster {
return &types.Cluster{
Name: cluster.ClusterName,
KubeconfigFile: filepath.Join(cluster.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", cluster.ClusterName)),
ExistingManagement: true,
}
}
func SetupSimpleMultiCluster(t *testing.T, provider framework.Provider, kubeVersion v1alpha1.KubernetesVersion) *framework.MulticlusterE2ETest {
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(kubeVersion),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(kubeVersion),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
return test
}
| 252 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
| 5 |
eks-anywhere | aws | Go | //go:build e2e && (docker || all_providers)
// +build e2e
// +build docker all_providers
package e2e
import (
"context"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/test/framework"
)
// Labels
func TestDockerKubernetesLabels(t *testing.T) {
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup(worker0, api.WithCount(2),
api.WithLabel(key1, val2)),
api.WithWorkerNodeGroup(worker1, api.WithCount(1)),
api.WithWorkerNodeGroup(worker2, api.WithCount(1),
api.WithLabel(key2, val2)),
),
)
runLabelsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)),
api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)),
api.WithWorkerNodeGroup(worker2),
api.WithControlPlaneLabel(cpKey1, cpVal1),
),
)
}
// Flux
func TestDockerKubernetes127GithubFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithFluxGithub(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runFluxFlow(test)
}
func TestDockerKubernetes127GitFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithFluxGit(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runFluxFlow(test)
}
func TestDockerInstallGitFluxDuringUpgrade(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runUpgradeFlowWithFlux(
test,
v1alpha1.Kube127,
framework.WithFluxGit(),
framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)),
)
}
func TestDockerInstallGithubFluxDuringUpgrade(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runUpgradeFlowWithFlux(
test,
v1alpha1.Kube126,
framework.WithFluxGithub(api.WithFluxConfigName(framework.DefaultFluxConfigName)),
framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)),
)
}
// Curated packages
func TestDockerKubernetes123CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestDockerKubernetes124CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestDockerKubernetes125CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestDockerKubernetes126CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestDockerKubernetes127CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestDockerKubernetes123CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestDockerKubernetes124CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestDockerKubernetes125CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestDockerKubernetes126CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestDockerKubernetes127CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestDockerKubernetes123CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestDockerKubernetes124CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestDockerKubernetes125CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestDockerKubernetes126CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestDockerKubernetes127CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestDockerKubernetes123CuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes124CuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes125CuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes126CuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes127CuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes123CuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestDockerKubernetes124CuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestDockerKubernetes125CuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestDockerKubernetes126CuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestDockerKubernetes127CuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestDockerKubernetes123CuratedPackagesDisabled(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues,
&v1alpha1.PackageConfiguration{Disable: true}),
)
runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes124CuratedPackagesDisabled(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues,
&v1alpha1.PackageConfiguration{Disable: true}),
)
runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes125CuratedPackagesDisabled(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues,
&v1alpha1.PackageConfiguration{Disable: true}),
)
runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes126CuratedPackagesDisabled(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues,
&v1alpha1.PackageConfiguration{Disable: true}),
)
runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary
}
func TestDockerKubernetes127CuratedPackagesDisabled(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t, framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues,
&v1alpha1.PackageConfiguration{Disable: true}),
)
runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary
}
func TestDockerCuratedPackagesMetalLB(t *testing.T) {
RunMetalLBDockerTests(t)
}
// AWS IAM Auth
func TestDockerKubernetes123AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runAWSIamAuthFlow(test)
}
func TestDockerKubernetes124AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runAWSIamAuthFlow(test)
}
func TestDockerKubernetes125AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runAWSIamAuthFlow(test)
}
func TestDockerKubernetes126AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runAWSIamAuthFlow(test)
}
func TestDockerKubernetes127AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runAWSIamAuthFlow(test)
}
// Flux
func TestDockerUpgradeWorkloadClusterWithGithubFlux(t *testing.T) {
provider := framework.NewDocker(t)
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithFluxGithub(),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithFluxGithub(),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
),
),
)
runWorkloadClusterFlowWithGitOps(
test,
framework.WithClusterUpgradeGit(
api.WithKubernetesVersion(v1alpha1.Kube125),
api.WithControlPlaneCount(2),
api.WithWorkerNodeCount(2),
),
// Needed in order to replace the DockerDatacenterConfig namespace field with the value specified
// compared to when it was initially created without it.
provider.WithProviderUpgradeGit(),
)
}
// OIDC
func TestDockerKubernetes123OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runOIDCFlow(test)
}
func TestDockerKubernetes124OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runOIDCFlow(test)
}
func TestDockerKubernetes125OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runOIDCFlow(test)
}
func TestDockerKubernetes126OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runOIDCFlow(test)
}
func TestDockerKubernetes127OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runOIDCFlow(test)
}
// RegistryMirror
func TestDockerKubernetes127RegistryMirrorAndCert(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestDockerKubernetes127AirgappedRegistryMirrorAndCert(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName),
)
runDockerAirgapConfigFlow(test)
}
func TestDockerKubernetes127RegistryMirrorInsecureSkipVerify(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithRegistryMirrorInsecureSkipVerify(constants.DockerProviderName),
)
runRegistryMirrorConfigFlow(test)
}
// Simple flow
func TestDockerKubernetes123SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestDockerKubernetes124SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestDockerKubernetes125SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runSimpleFlow(test)
}
func TestDockerKubernetes126SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleFlow(test)
}
func TestDockerKubernetes127SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runSimpleFlow(test)
}
// Stacked etcd
func TestDockerKubernetesStackedEtcd(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewDocker(t),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
// Taints
func TestDockerKubernetes127Taints(t *testing.T) {
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoScheduleTaint()), api.WithCount(2)),
api.WithWorkerNodeGroup(worker1, api.WithCount(1)),
api.WithWorkerNodeGroup(worker2, api.WithTaint(framework.PreferNoScheduleTaint()), api.WithCount(1)),
),
)
runTaintsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker2, api.WithNoTaints()),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
// Upgrade
func TestDockerKubernetes126To127StackedEtcdUpgrade(t *testing.T) {
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithStackedEtcdTopology()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
}
func TestDockerKubernetes126To127ExternalEtcdUpgrade(t *testing.T) {
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
}
func TestDockerKubernetes124UpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
)
}
func TestDockerKubernetes123to124UpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
}
func TestDockerKubernetes124to125UpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube125,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
}
func TestDockerKubernetes125to126UpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube126,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
}
func TestDockerKubernetes126to127UpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
}
func TestDockerKubernetes126to127GithubFluxEnabledUpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
test := framework.NewClusterE2ETest(
t,
framework.NewDocker(t),
framework.WithFluxGithub(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeWithFluxFromReleaseFlow(
test,
release,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
}
func TestDockerKubernetes126ManagementClusterUpgradeFromLatestSideEffects(t *testing.T) {
provider := framework.NewDocker(t)
runTestManagementClusterUpgradeSideEffects(t, provider, "", v1alpha1.Kube126)
}
func TestDockerKubernetes124UpgradeAndRemoveWorkerNodeGroupsAPI(t *testing.T) {
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(
t, provider,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
provider.WithNewWorkerNodeGroup("", framework.WithWorkerNodeGroup("worker-1", api.WithCount(2))),
provider.WithNewWorkerNodeGroup("", framework.WithWorkerNodeGroup("worker-2", api.WithCount(1))),
provider.WithNewWorkerNodeGroup(
"", framework.WithWorkerNodeGroup("worker-3", api.WithCount(1), api.WithLabel("tier", "frontend")),
),
)
runUpgradeFlowWithAPI(
test,
api.ClusterToConfigFiller(
api.RemoveWorkerNodeGroup("worker-2"),
api.WithWorkerNodeGroup("worker-1", api.WithCount(1)),
api.RemoveWorkerNodeGroup("worker-3"),
api.WithWorkerNodeGroup("worker-3", api.WithCount(1), api.WithTaint(framework.NoScheduleTaint())),
),
provider.WithNewWorkerNodeGroup("", framework.WithWorkerNodeGroup("worker-4", api.WithCount(1))),
)
}
// Workload Cluster API
func TestDockerUpgradeKubernetes123to124WorkloadClusterScaleupAPI(t *testing.T) {
provider := framework.NewDocker(t)
managementCluster := framework.NewClusterE2ETest(
t, provider,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup("worker-0", api.WithCount(1)),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterUpgradeFlowAPI(
test,
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(3),
api.WithWorkerNodeGroup("worker-0", api.WithCount(2)),
),
)
}
func TestDockerUpgradeWorkloadClusterLabelsAndTaintsAPI(t *testing.T) {
provider := framework.NewDocker(t)
managementCluster := framework.NewClusterE2ETest(
t, provider,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithLabel("tier", "frontend"), api.WithTaint(framework.NoScheduleTaint())),
api.WithWorkerNodeGroup("worker-1", api.WithCount(1)),
api.WithWorkerNodeGroup("worker-2", api.WithCount(1), api.WithTaint(framework.PreferNoScheduleTaint())),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterUpgradeFlowAPI(
test,
api.ClusterToConfigFiller(
api.WithControlPlaneLabel("cpKey1", "cpVal1"),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
api.RemoveWorkerNodeGroup("worker-0"),
api.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithLabel("key1", "val1"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-1", api.WithLabel("key2", "val2"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-2", api.WithNoTaints()),
),
)
}
func TestDockerUpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsAPI(t *testing.T) {
provider := framework.NewDocker(t)
managementCluster := framework.NewClusterE2ETest(
t, provider,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup("worker-0", api.WithCount(2)),
api.WithWorkerNodeGroup("worker-1", api.WithCount(1)),
api.WithWorkerNodeGroup("worker-2", api.WithCount(1)),
api.WithExternalEtcdTopology(1),
),
),
)
runWorkloadClusterUpgradeFlowAPI(
test,
api.ClusterToConfigFiller(
api.WithControlPlaneCount(3),
api.WithWorkerNodeGroup("worker-0", api.WithCount(1)),
api.WithWorkerNodeGroup("worker-1", api.WithCount(2)),
api.RemoveWorkerNodeGroup("worker-2"),
api.WithWorkerNodeGroup("worker-3", api.WithCount(1)),
),
)
}
func TestDockerKubernetes123to124UpgradeFromLatestMinorReleaseAPI(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewDocker(t)
managementCluster := framework.NewClusterE2ETest(
t, provider,
)
managementCluster.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
managementCluster.UpdateClusterConfig(api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
))
test := framework.NewMulticlusterE2ETest(t, managementCluster)
wc := framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
)
wc.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
wc.UpdateClusterConfig(api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
))
test.WithWorkloadClusters(wc)
runMulticlusterUpgradeFromReleaseFlowAPI(
test,
release,
api.ClusterToConfigFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
}
func TestDockerUpgradeKubernetes123to124WorkloadClusterScaleupGitHubFluxAPI(t *testing.T) {
provider := framework.NewDocker(t)
managementCluster := framework.NewClusterE2ETest(
t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup("worker-0", api.WithCount(1)),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterUpgradeFlowAPIWithFlux(
test,
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(3),
api.WithWorkerNodeGroup("worker-0", api.WithCount(2)),
),
)
}
func TestDockerUpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t *testing.T) {
provider := framework.NewDocker(t)
managementCluster := framework.NewClusterE2ETest(
t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithLabel("tier", "frontend"), api.WithTaint(framework.NoScheduleTaint())),
api.WithWorkerNodeGroup("worker-1", api.WithCount(1)),
api.WithWorkerNodeGroup("worker-2", api.WithCount(1), api.WithTaint(framework.PreferNoScheduleTaint())),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterUpgradeFlowAPIWithFlux(
test,
api.ClusterToConfigFiller(
api.WithControlPlaneLabel("cpKey1", "cpVal1"),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
api.RemoveWorkerNodeGroup("worker-0"),
api.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithLabel("key1", "val1"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-1", api.WithLabel("key2", "val2"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-2", api.WithNoTaints()),
),
)
}
func TestDockerUpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsGitHubFluxAPI(t *testing.T) {
provider := framework.NewDocker(t)
managementCluster := framework.NewClusterE2ETest(
t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
framework.WithFluxGithubConfig(
api.WithClusterConfigPath("test"),
api.WithBranch("docker"),
),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup("worker-0", api.WithCount(2)),
api.WithWorkerNodeGroup("worker-1", api.WithCount(1)),
api.WithWorkerNodeGroup("worker-2", api.WithCount(1)),
api.WithExternalEtcdTopology(1),
),
),
)
runWorkloadClusterUpgradeFlowAPIWithFlux(
test,
api.ClusterToConfigFiller(
api.WithControlPlaneCount(3),
api.WithWorkerNodeGroup("worker-0", api.WithCount(1)),
api.WithWorkerNodeGroup("worker-1", api.WithCount(2)),
api.RemoveWorkerNodeGroup("worker-2"),
api.WithWorkerNodeGroup("worker-3", api.WithCount(1)),
),
)
}
func TestDockerCiliumSkipUpgrade_CLICreate(t *testing.T) {
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(t, provider,
framework.WithClusterFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
api.WithCiliumSkipUpgrade(),
),
)
test.ValidateCiliumCLIAvailable()
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateClusterState()
test.ValidateEKSACiliumInstalled()
test.DeleteCluster()
}
func TestDockerCiliumSkipUpgrade_CLIUpgrade(t *testing.T) {
release, err := framework.GetLatestMinorReleaseFromTestBranch()
if err != nil {
t.Fatal(err)
}
ver, _ := semver.New(release.Version)
previousRelease, err := framework.GetPreviousMinorReleaseFromVersion(ver)
if err != nil {
t.Fatal(err)
}
provider := framework.NewDocker(t)
test := framework.NewClusterE2ETest(t, provider,
framework.WithClusterFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
)
test.ValidateCiliumCLIAvailable()
test.GenerateClusterConfig(framework.ExecuteWithEksaRelease(previousRelease))
test.CreateCluster(framework.ExecuteWithEksaRelease(previousRelease))
test.ReplaceCiliumWithOSSCilium()
test.UpgradeClusterWithNewConfig(
[]framework.ClusterE2ETestOpt{
framework.WithClusterUpgrade(api.WithCiliumSkipUpgrade()),
},
)
test.ValidateClusterState()
test.ValidateEKSACiliumNotInstalled()
test.DeleteCluster()
}
func TestDockerCiliumSkipUpgrade_ControllerCreate(t *testing.T) {
provider := framework.NewDocker(t)
management := framework.NewClusterE2ETest(t, provider).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
)
management.ValidateCiliumCLIAvailable()
test := framework.NewMulticlusterE2ETest(t, management)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(management.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
api.WithCiliumSkipUpgrade(),
),
),
)
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
client, err := wc.BuildWorkloadClusterClient()
if err != nil {
wc.T.Fatalf("Error creating workload cluster client: %v", err)
}
framework.AwaitCiliumDaemonSetReady(context.Background(), client, 20, 5*time.Second)
wc.DeleteClusterWithKubectl()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestDockerCiliumSkipUpgrade_ControllerUpgrade(t *testing.T) {
provider := framework.NewDocker(t)
management := framework.NewClusterE2ETest(t, provider).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
)
management.ValidateCiliumCLIAvailable()
test := framework.NewMulticlusterE2ETest(t, management)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(management.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
client, err := wc.BuildWorkloadClusterClient()
if err != nil {
wc.T.Fatalf("Error creating workload cluster client: %v", err)
}
// Wait for Cilium to come up.
framework.AwaitCiliumDaemonSetReady(context.Background(), client, 20, 5*time.Second)
// Skip Cilium upgrades and reapply the kubeconfig.
wc.UpdateClusterConfig(api.ClusterToConfigFiller(api.WithCiliumSkipUpgrade()))
wc.ApplyClusterManifest()
// Give some time for reconciliation to happen.
time.Sleep(15 * time.Second)
// Validate EKSA Cilium is still installed because we haven't done anything to it yet
// and the controller shouldn't have removed it.
framework.AwaitCiliumDaemonSetReady(context.Background(), client, 20, 5*time.Second)
// Introduce a different OSS Cillium, wait for it to come up and validate the controller
// doesn't try to override the new Cilium.
wc.ReplaceCiliumWithOSSCilium()
wc.ValidateClusterState()
wc.ValidateEKSACiliumNotInstalled()
wc.DeleteClusterWithKubectl()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
| 1,323 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/test/framework"
)
func runDownloadArtifactsFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.DownloadArtifacts()
}
| 14 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"time"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/test/framework"
)
const (
emissaryPackageName = "emissary"
emissaryPackagePrefix = "test"
)
func runCuratedPackageEmissaryInstall(test *framework.ClusterE2ETest) {
test.SetPackageBundleActive()
packageFile := test.BuildPackageConfigFile(emissaryPackageName, emissaryPackagePrefix, EksaPackagesNamespace)
test.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ClusterName))
test.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withMgmtCluster(test))
if test.Provider.Name() == constants.DockerProviderName {
test.TestEmissaryPackageRouting(emissaryPackagePrefix+"-"+emissaryPackageName, withMgmtCluster(test))
}
}
func runCuratedPackageEmissaryInstallSimpleFlow(test *framework.ClusterE2ETest) {
test.WithCluster(runCuratedPackageEmissaryInstall)
}
func runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test *framework.MulticlusterE2ETest) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(e *framework.WorkloadCluster) {
e.GenerateClusterConfig()
e.CreateCluster()
e.VerifyPackageControllerNotInstalled()
test.ManagementCluster.SetPackageBundleActive()
packageFile := e.BuildPackageConfigFile(emissaryPackageName, emissaryPackagePrefix, EksaPackagesNamespace)
test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName))
e.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withMgmtCluster(test.ManagementCluster))
e.DeleteCluster()
})
time.Sleep(5 * time.Minute)
test.DeleteManagementCluster()
}
func runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOnHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneNoTaints, framework.ValidateControlPlaneLabels)
runCuratedPackageEmissaryInstall(test)
test.DeleteCluster()
test.PowerOffHardware()
test.ValidateHardwareDecommissioned()
}
| 60 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runFluxFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateFlux()
test.StopIfFailed()
test.DeleteCluster()
}
func runUpgradeFlowWithFlux(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfig()
test.CreateCluster()
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(updateVersion)
test.ValidateFlux()
test.StopIfFailed()
test.DeleteCluster()
}
| 28 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/test/framework"
)
func runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test *framework.ClusterE2ETest) {
test.WithCluster(func(test *framework.ClusterE2ETest) {
test.InstallLocalStorageProvisioner()
packagePrefix := "test"
installNs := "harbor"
test.CreateNamespace(installNs)
test.InstallCuratedPackage("harbor", packagePrefix, kubeconfig.FromClusterName(test.ClusterName),
"--set secretKey=use-a-secret-key",
"--set expose.tls.certSource=auto",
"--set expose.tls.auto.commonName=localhost",
"--set persistence.persistentVolumeClaim.registry.storageClass=local-path",
"--set persistence.persistentVolumeClaim.jobservice.jobLog.storageClass=local-path",
"--set persistence.persistentVolumeClaim.database.storageClass=local-path",
"--set persistence.persistentVolumeClaim.redis.storageClass=local-path",
"--set persistence.persistentVolumeClaim.trivy.storageClass=local-path",
)
test.VerifyHarborPackageInstalled(packagePrefix, installNs)
})
}
| 31 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runNTPFlow(test *framework.ClusterE2ETest, osFamily v1alpha1.OSFamily) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateNTPConfig(osFamily)
test.DeleteCluster()
}
func runBottlerocketConfigurationFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateBottlerocketKubernetesSettings()
test.DeleteCluster()
}
| 24 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runLabelsUpgradeFlow(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateWorkerNodes(framework.ValidateWorkerNodeLabels)
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneLabels)
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(updateVersion)
test.ValidateWorkerNodes(framework.ValidateWorkerNodeLabels)
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneLabels)
test.StopIfFailed()
test.DeleteCluster()
}
| 23 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/suite"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/test/framework"
)
type MetalLBSuite struct {
suite.Suite
cluster *framework.ClusterE2ETest
kubernetesVersion v1alpha1.KubernetesVersion
provider framework.Provider
}
func RunMetalLBDockerTests(t *testing.T) {
for i, v := range KubeVersions {
s := new(MetalLBSuite)
s.provider = framework.NewDocker(t)
s.kubernetesVersion = v
os.Setenv(framework.ClusterPrefixVar, fmt.Sprintf("%s-%d", EksaPackagesNamespace, i))
suite.Run(t, s)
}
}
func (suite *MetalLBSuite) SetupSuite() {
t := suite.T()
suite.cluster = framework.NewClusterE2ETest(t,
suite.provider,
framework.WithClusterFiller(api.WithKubernetesVersion(suite.kubernetesVersion)),
framework.WithPackageConfig(t, packageBundleURI(suite.kubernetesVersion),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
kubeVersionNameDiscriminator(suite.kubernetesVersion),
)
}
func kubeVersionNameDiscriminator(version v1alpha1.KubernetesVersion) framework.ClusterE2ETestOpt {
return func(e *framework.ClusterE2ETest) {
e.ClusterName = fmt.Sprintf("%s-%s", e.ClusterName, strings.ReplaceAll(string(version), ".", "-"))
}
}
func getIPAddressPoolSpec(addresses []string, autoAssign bool) string {
aList, _ := json.Marshal(addresses)
return fmt.Sprintf(`{"addresses":%s,"autoAssign":%s,"avoidBuggyIPs":false}`, aList, strconv.FormatBool(autoAssign))
}
func getL2AdvertisementSpec(ipPoolNames []string) string {
pools, _ := json.Marshal(ipPoolNames)
return fmt.Sprintf(`{"ipAddressPools":%s}`, pools)
}
func getBGPAdvertisementSpec(ipPoolNames []string) string {
pools, _ := json.Marshal(ipPoolNames)
return fmt.Sprintf(`{"aggregationLength":32,"aggregationLengthV6":32,"ipAddressPools":%s,"localPref":123}`, pools)
}
func (suite *MetalLBSuite) TestPackagesMetalLB() {
// This should be split into multiple tests with a cluster setup in `SetupSuite`.
// This however requires the creation of utilites managing cluster creation.
t := suite.T()
suite.cluster.WithCluster(func(test *framework.ClusterE2ETest) {
err := WaitForPackageToBeInstalled(test, context.Background(), "eks-anywhere-packages", 3*time.Minute)
if err != nil {
test.T.Fatalf("packages controller not in installed state: %s", err)
}
err = WaitForPackageToBeInstalled(test, context.Background(), "eks-anywhere-packages-crds", 3*time.Minute)
if err != nil {
test.T.Fatalf("packages controller crds not in installed state: %s", err)
}
kcfg := kubeconfig.FromClusterName(test.ClusterName)
cluster := suite.cluster.Cluster()
ctx := context.Background()
namespace := "metallb-system"
test.CreateNamespace(namespace)
packageName := "metallb"
packageCrdName := "metallb-crds"
packagePrefix := "test"
test.SetPackageBundleActive()
t.Run("Basic installation", func(t *testing.T) {
t.Cleanup(func() {
test.UninstallCuratedPackage(packagePrefix)
test.UninstallCuratedPackage(packageCrdName)
})
test.InstallCuratedPackage(packageName, packagePrefix, kcfg)
err := WaitForPackageToBeInstalled(test, ctx, packagePrefix, 120*time.Second)
if err != nil {
t.Fatalf("waiting for metallb package to be installed: %s", err)
}
err = test.KubectlClient.WaitForDeployment(context.Background(),
cluster, "5m", "Available", "test-metallb-controller", namespace)
if err != nil {
t.Fatalf("waiting for metallb controller deployment to be available: %s", err)
}
err = WaitForDaemonset(test, ctx, "test-metallb-speaker", namespace, 2, 5*time.Minute)
if err != nil {
t.Fatalf("waiting for metallb speaker daemonset to be available: %s", err)
}
})
t.Run("Address pool configuration", func(t *testing.T) {
ip := "10.100.100.1"
ipSub := ip + "/32"
t.Cleanup(func() {
test.UninstallCuratedPackage(packagePrefix)
test.UninstallCuratedPackage(packageCrdName)
})
test.CreateResource(ctx, fmt.Sprintf(
`
apiVersion: packages.eks.amazonaws.com/v1alpha1
kind: Package
metadata:
name: test
namespace: eksa-packages-%s
spec:
packageName: metallb
config: |
IPAddressPools:
- name: default
addresses:
- %s
L2Advertisements:
- ipAddressPools:
- default
`, test.ClusterName, ipSub))
err := WaitForPackageToBeInstalled(test, ctx, packagePrefix, 120*time.Second)
if err != nil {
t.Fatalf("waiting for metallb package to be installed: %s", err)
}
err = test.KubectlClient.WaitForDeployment(context.Background(),
cluster, "5m", "Available", "test-metallb-controller", namespace)
if err != nil {
t.Fatalf("waiting for metallb controller deployment to be available: %s", err)
}
err = WaitForDaemonset(test, ctx, "test-metallb-speaker", namespace, 2, 5*time.Minute)
if err != nil {
t.Fatalf("waiting for metallb speaker daemonset to be available: %s", err)
}
expectedAddressPool := getIPAddressPoolSpec([]string{ipSub}, true)
err = WaitForResource(
test,
ctx,
"ipaddresspools.metallb.io/default",
namespace,
"{.spec}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(expectedAddressPool),
)
if err != nil {
t.Fatal(err)
}
expectedAdvertisement := getL2AdvertisementSpec([]string{"default"})
err = WaitForResource(
test,
ctx,
"l2advertisements.metallb.io/l2adv-0",
namespace,
"{.spec}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(expectedAdvertisement),
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
opts := &kubernetes.KubectlDeleteOptions{
Name: "my-service",
Namespace: "default",
}
test.KubectlClient.Delete(ctx, "service", kubeconfig.FromClusterName(test.ClusterName), opts)
})
test.CreateResource(ctx, `
apiVersion: v1
kind: Service
metadata:
name: my-service
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 9376
`)
err = WaitForResource(
test,
ctx,
"service/my-service",
"default",
"{.status.loadBalancer.ingress[0].ip}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(ip),
)
if err != nil {
t.Fatal(err)
}
})
t.Run("BGP configuration", func(t *testing.T) {
ip := "10.100.100.2"
ipSub := ip + "/32"
ipTwo := "10.100.0.1"
ipTwoSub := ipTwo + "/32"
t.Cleanup(func() {
test.UninstallCuratedPackage(packagePrefix)
test.UninstallCuratedPackage(packageCrdName)
})
test.CreateResource(ctx, fmt.Sprintf(
`
apiVersion: packages.eks.amazonaws.com/v1alpha1
kind: Package
metadata:
name: test
namespace: eksa-packages-%s
spec:
packageName: metallb
config: |
IPAddressPools:
- name: default
addresses:
- %s
autoAssign: false
- name: bgp
addresses:
- %s
L2Advertisements:
- ipAddressPools:
- default
BGPAdvertisements:
- ipAddressPools:
- bgp
localPref: 123
aggregationLength: 32
aggregationLengthV6: 32
BGPPeers:
- myASN: 123
peerASN: 55001
peerAddress: 12.2.4.2
keepaliveTime: 30s
`, test.ClusterName, ipTwoSub, ipSub))
err := WaitForPackageToBeInstalled(test, ctx, packagePrefix, 120*time.Second)
if err != nil {
t.Fatalf("waiting for metallb package to be installed: %s", err)
}
err = test.KubectlClient.WaitForDeployment(context.Background(),
cluster, "5m", "Available", "test-metallb-controller", namespace)
if err != nil {
t.Fatalf("waiting for metallb controller deployment to be available: %s", err)
}
err = WaitForDaemonset(test, ctx, "test-metallb-speaker", namespace, 2, 5*time.Minute)
if err != nil {
t.Fatalf("waiting for metallb speaker daemonset to be available: %s", err)
}
expectedAddressPool := getIPAddressPoolSpec([]string{ipTwoSub}, false)
err = WaitForResource(
test,
ctx,
"ipaddresspools.metallb.io/default",
namespace,
"{.spec}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(expectedAddressPool),
)
if err != nil {
t.Fatal(err)
}
expectedAddressPool = getIPAddressPoolSpec([]string{ipSub}, true)
err = WaitForResource(
test,
ctx,
"ipaddresspools.metallb.io/bgp",
namespace,
"{.spec}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(expectedAddressPool),
)
if err != nil {
t.Fatal(err)
}
expectedBGPAdv := getBGPAdvertisementSpec([]string{"bgp"})
err = WaitForResource(
test,
ctx,
"bgpadvertisements.metallb.io/bgpadv-0",
namespace,
"{.spec}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(expectedBGPAdv),
)
if err != nil {
t.Fatal(err)
}
expectedBGPPeer := `{"keepaliveTime":"30s","myASN":123,"peerASN":55001,"peerAddress":"12.2.4.2","peerPort":179}`
err = WaitForResource(
test,
ctx,
"bgppeers.metallb.io/bgppeer-0",
namespace,
"{.spec}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(expectedBGPPeer),
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
opts := &kubernetes.KubectlDeleteOptions{
Name: "my-service",
Namespace: "default",
}
test.KubectlClient.Delete(ctx, "service", kubeconfig.FromClusterName(test.ClusterName), opts)
})
test.CreateResource(ctx, `
apiVersion: v1
kind: Service
metadata:
name: my-service
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 9376
`)
err = WaitForResource(
test,
ctx,
"service/my-service",
"default",
"{.status.loadBalancer.ingress[0].ip}",
20*time.Second,
NoErrorPredicate,
StringMatchPredicate(ip),
)
if err != nil {
t.Fatal(err)
}
})
})
}
| 374 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"time"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runWorkloadClusterFlow(test *framework.MulticlusterE2ETest) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.GenerateClusterConfig()
w.CreateCluster()
w.DeleteCluster()
})
time.Sleep(5 * time.Minute)
test.DeleteManagementCluster()
}
func runWorkloadClusterPrevVersionCreateFlow(test *framework.MulticlusterE2ETest, latestMinorRelease *releasev1.EksARelease) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.GenerateClusterConfigForVersion(latestMinorRelease.Version, framework.ExecuteWithEksaRelease(latestMinorRelease))
w.CreateCluster(framework.ExecuteWithEksaRelease(latestMinorRelease))
w.DeleteCluster()
})
test.DeleteManagementCluster()
}
func runWorkloadClusterFlowWithGitOps(test *framework.MulticlusterE2ETest, clusterOpts ...framework.ClusterE2ETestOpt) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.GenerateClusterConfig()
w.CreateCluster()
w.UpgradeWithGitOps(clusterOpts...)
time.Sleep(5 * time.Minute)
w.DeleteCluster()
})
time.Sleep(5 * time.Minute)
test.DeleteManagementCluster()
}
func runWorkloadClusterGitOpsAPIFlowForBareMetal(test *framework.MulticlusterE2ETest) {
test.CreateTinkerbellManagementCluster()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.WaitForAvailableHardware()
w.PowerOffHardware()
test.PushWorkloadClusterToGit(w)
w.WaitForKubeconfig()
w.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(w)
w.ValidateClusterDelete()
w.ValidateHardwareDecommissioned()
})
test.DeleteManagementCluster()
}
func runWorkloadClusterGitOpsAPIUpgradeFlowForBareMetal(test *framework.MulticlusterE2ETest, filler ...api.ClusterConfigFiller) {
test.CreateTinkerbellManagementCluster()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.WaitForAvailableHardware()
w.PowerOffHardware()
test.PushWorkloadClusterToGit(w)
w.WaitForKubeconfig()
w.ValidateClusterState()
test.PushWorkloadClusterToGit(w, filler...)
w.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(w)
w.ValidateClusterDelete()
w.ValidateHardwareDecommissioned()
})
test.DeleteManagementCluster()
}
func runTinkerbellWorkloadClusterFlow(test *framework.MulticlusterE2ETest) {
test.ManagementCluster.GenerateClusterConfig()
test.CreateTinkerbellManagementCluster()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.GenerateClusterConfig()
w.PowerOffHardware()
w.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
w.StopIfFailed()
w.DeleteCluster()
w.ValidateHardwareDecommissioned()
})
test.DeleteTinkerbellManagementCluster()
}
func runWorkloadClusterWithAPIFlowForBareMetal(test *framework.MulticlusterE2ETest) {
test.CreateTinkerbellManagementCluster()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.WaitForAvailableHardware()
w.PowerOffHardware()
w.ApplyClusterManifest()
w.WaitForKubeconfig()
w.ValidateClusterState()
w.DeleteClusterWithKubectl()
w.ValidateClusterDelete()
w.ValidateHardwareDecommissioned()
})
test.DeleteTinkerbellManagementCluster()
}
func runSimpleWorkloadUpgradeFlowForBareMetal(test *framework.MulticlusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.ManagementCluster.GenerateClusterConfig()
test.CreateTinkerbellManagementCluster()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.GenerateClusterConfig()
w.PowerOffHardware()
w.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
time.Sleep(2 * time.Minute)
w.UpgradeCluster(clusterOpts)
time.Sleep(2 * time.Minute)
w.ValidateCluster(updateVersion)
w.StopIfFailed()
w.DeleteCluster()
w.ValidateHardwareDecommissioned()
})
test.DeleteManagementCluster()
}
func runWorkloadClusterUpgradeFlowWithAPIForBareMetal(test *framework.MulticlusterE2ETest, filler ...api.ClusterConfigFiller) {
test.CreateTinkerbellManagementCluster()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.WaitForAvailableHardware()
w.PowerOffHardware()
w.ApplyClusterManifest()
w.WaitForKubeconfig()
w.ValidateClusterState()
w.UpdateClusterConfig(filler...)
w.ApplyClusterManifest()
w.ValidateClusterState()
w.DeleteClusterWithKubectl()
w.ValidateClusterDelete()
w.ValidateHardwareDecommissioned()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func runTinkerbellWorkloadClusterFlowSkipPowerActions(test *framework.MulticlusterE2ETest) {
test.ManagementCluster.GenerateClusterConfig()
test.CreateTinkerbellManagementCluster()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.GenerateClusterConfig()
w.PowerOffHardware()
w.PXEBootHardware()
w.PowerOnHardware()
w.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
w.StopIfFailed()
w.DeleteCluster()
w.PowerOffHardware()
w.ValidateHardwareDecommissioned()
})
test.ManagementCluster.StopIfFailed()
test.ManagementCluster.DeleteCluster()
test.ManagementCluster.PowerOffHardware()
test.ManagementCluster.ValidateHardwareDecommissioned()
}
| 166 |
eks-anywhere | aws | Go | //go:build e2e && (nutanix || all_providers)
// +build e2e
// +build nutanix all_providers
package e2e
import (
"testing"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
// Curated packages
func TestNutanixKubernetes127UbuntuCuratedPackagesSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestNutanixKubernetes127UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestNutanixKubernetes127UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestNutanixKubernetes127UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test)
}
func TestNutanixKubernetes127UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestNutanixKubernetes127UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestNutanixKubernetes126UbuntuCuratedPackagesSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestNutanixKubernetes126UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestNutanixKubernetes126UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestNutanixKubernetes126UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test)
}
func TestNutanixKubernetes126UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestNutanixKubernetes126UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestNutanixKubernetes125UbuntuCuratedPackagesSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestNutanixKubernetes125UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestNutanixKubernetes125UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestNutanixKubernetes125UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test)
}
func TestNutanixKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestNutanixKubernetes125UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestNutanixKubernetes124UbuntuCuratedPackagesSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestNutanixKubernetes124UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestNutanixKubernetes124UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestNutanixKubernetes124UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test)
}
func TestNutanixKubernetes124UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestNutanixKubernetes124UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestNutanixKubernetes123UbuntuCuratedPackagesSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestNutanixKubernetes123UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestNutanixKubernetes123UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestNutanixKubernetes123UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallSimpleFlow(test)
}
func TestNutanixKubernetes123UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestNutanixKubernetes123UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
// Simpleflow
func TestNutanixKubernetes123SimpleFlowWithName(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes124SimpleFlowWithName(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes125SimpleFlowWithName(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes126SimpleFlowWithName(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes127SimpleFlowWithName(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes123SimpleFlowWithUUID(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu123NutanixUUID(),
framework.WithPrismElementClusterUUID(),
framework.WithNutanixSubnetUUID()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes124SimpleFlowWithUUID(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu124NutanixUUID(),
framework.WithPrismElementClusterUUID(),
framework.WithNutanixSubnetUUID()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes125SimpleFlowWithUUID(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu125NutanixUUID(),
framework.WithPrismElementClusterUUID(),
framework.WithNutanixSubnetUUID()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes126SimpleFlowWithUUID(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu126NutanixUUID(),
framework.WithPrismElementClusterUUID(),
framework.WithNutanixSubnetUUID()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleFlow(test)
}
func TestNutanixKubernetes127SimpleFlowWithUUID(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu127NutanixUUID(),
framework.WithPrismElementClusterUUID(),
framework.WithNutanixSubnetUUID()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runSimpleFlow(test)
}
// Upgrade
func TestNutanixKubernetes123To124UbuntuUpgrade(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu123Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(framework.UpdateNutanixUbuntuTemplate124Var()),
)
}
func TestNutanixKubernetes124To125UbuntuUpgrade(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu124Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube125,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube125)),
provider.WithProviderUpgrade(framework.UpdateNutanixUbuntuTemplate125Var()),
)
}
func TestNutanixKubernetes125To126UbuntuUpgrade(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube126,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)),
provider.WithProviderUpgrade(framework.UpdateNutanixUbuntuTemplate126Var()),
)
}
func TestNutanixKubernetes126To127UbuntuUpgrade(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(framework.UpdateNutanixUbuntuTemplate127Var()),
)
}
func TestNutanixKubernetes123UbuntuWorkerNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu123Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(3)),
)
}
func TestNutanixKubernetes124UbuntuWorkerNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu124Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(3)),
)
}
// 1 worker node cluster scaled up to 3
func TestNutanixKubernetes125UbuntuWorkerNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube125,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(3)),
)
}
// 1 worker node cluster scaled up to 3
func TestNutanixKubernetes126UbuntuWorkerNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube126,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(3)),
)
}
// 1 worker node cluster scaled up to 3
func TestNutanixKubernetes127UbuntuWorkerNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu127Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(3)),
)
}
func TestNutanixKubernetes123UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu123Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithEnvVar("features.NutanixProviderEnvVar", "true"),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
)
}
func TestNutanixKubernetes124UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu124Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithEnvVar("features.NutanixProviderEnvVar", "true"),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
)
}
// 1 node control plane cluster scaled up to 3
func TestNutanixKubernetes125UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube125,
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
)
}
// 1 node control plane cluster scaled up to 3
func TestNutanixKubernetes126UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube126,
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
)
}
// 1 node control plane cluster scaled up to 3
func TestNutanixKubernetes127UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu127Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
)
}
func TestNutanixKubernetes123UbuntuWorkerNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu123Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)),
)
}
func TestNutanixKubernetes124UbuntuWorkerNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu124Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)),
)
}
// 3 worker node cluster scaled down to 1
func TestNutanixKubernetes125UbuntuWorkerNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube125,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)),
)
}
// 3 worker node cluster scaled down to 1
func TestNutanixKubernetes126UbuntuWorkerNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube126,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)),
)
}
// 3 worker node cluster scaled down to 1
func TestNutanixKubernetes127UbuntuWorkerNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu127Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)),
)
}
func TestNutanixKubernetes123UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu123Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube123,
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
)
}
func TestNutanixKubernetes124UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu124Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
)
}
// 3 node control plane cluster scaled down to 1
func TestNutanixKubernetes125UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube125,
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
)
}
// 3 node control plane cluster scaled down to 1
func TestNutanixKubernetes126UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube126,
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
)
}
// 3 node control plane cluster scaled down to 1
func TestNutanixKubernetes127UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) {
provider := framework.NewNutanix(t, framework.WithUbuntu127Nutanix())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
)
}
// OIDC Tests
func TestNutanixKubernetes123OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestNutanixKubernetes124OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestNutanixKubernetes125OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestNutanixKubernetes126OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestNutanixKubernetes127OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
// IAMAuthenticator Tests
func TestNutanixKubernetes123AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu123Nutanix()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runAWSIamAuthFlow(test)
}
func TestNutanixKubernetes124AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu124Nutanix()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runAWSIamAuthFlow(test)
}
func TestNutanixKubernetes125AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu125Nutanix()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runAWSIamAuthFlow(test)
}
func TestNutanixKubernetes126AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu126Nutanix()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runAWSIamAuthFlow(test)
}
func TestNutanixKubernetes127AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewNutanix(t, framework.WithUbuntu127Nutanix()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runAWSIamAuthFlow(test)
}
| 999 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runOIDCFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateOIDC()
test.StopIfFailed()
test.DeleteCluster()
}
func runTinkerbellOIDCFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateOIDC()
test.StopIfFailed()
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
func runUpgradeFlowWithOIDC(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateOIDC()
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(updateVersion)
test.ValidateOIDC()
test.StopIfFailed()
test.DeleteCluster()
}
| 40 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/test/framework"
)
const (
prometheusPackageName = "prometheus"
prometheusPackagePrefix = "generated"
prometheusPackageTargetNamespace = "observability"
)
func runCuratedPackagesPrometheusInstall(test *framework.ClusterE2ETest) {
packageFullName := prometheusPackagePrefix + "-" + prometheusPackageName
test.InstallLocalStorageProvisioner()
test.CreateNamespace(prometheusPackageTargetNamespace)
test.SetPackageBundleActive()
test.InstallCuratedPackage(prometheusPackageName, packageFullName,
kubeconfig.FromClusterName(test.ClusterName),
"--set server.persistentVolume.storageClass=local-path")
test.VerifyPrometheusPackageInstalled(packageFullName, prometheusPackageTargetNamespace)
test.VerifyPrometheusNodeExporterStates(packageFullName, prometheusPackageTargetNamespace)
test.VerifyPrometheusPrometheusServerStates(packageFullName, prometheusPackageTargetNamespace, "deployment")
}
func runCuratedPackagesPrometheusUpdate(test *framework.ClusterE2ETest) {
packageFullName := prometheusPackagePrefix + "-" + prometheusPackageName
test.InstallLocalStorageProvisioner()
test.CreateNamespace(prometheusPackageTargetNamespace)
test.SetPackageBundleActive()
test.InstallCuratedPackage(prometheusPackageName, packageFullName,
kubeconfig.FromClusterName(test.ClusterName),
"--set server.persistentVolume.storageClass=local-path")
test.ApplyPrometheusPackageServerStatefulSetFile(packageFullName, prometheusPackageTargetNamespace)
test.VerifyPrometheusPackageInstalled(packageFullName, prometheusPackageTargetNamespace)
test.VerifyPrometheusPrometheusServerStates(packageFullName, prometheusPackageTargetNamespace, "statefulset")
test.VerifyPrometheusNodeExporterStates(packageFullName, prometheusPackageTargetNamespace)
test.ApplyPrometheusPackageServerDeploymentFile(packageFullName, prometheusPackageTargetNamespace)
test.VerifyPrometheusPackageInstalled(packageFullName, prometheusPackageTargetNamespace)
test.VerifyPrometheusPrometheusServerStates(packageFullName, prometheusPackageTargetNamespace, "deployment")
test.VerifyPrometheusNodeExporterStates(packageFullName, prometheusPackageTargetNamespace)
}
func runCuratedPackagesPrometheusInstallSimpleFlow(test *framework.ClusterE2ETest) {
test.WithCluster(runCuratedPackagesPrometheusInstall)
}
func runCuratedPackagesPrometheusUpdateFlow(test *framework.ClusterE2ETest) {
test.WithCluster(runCuratedPackagesPrometheusUpdate)
}
func runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOnHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneNoTaints, framework.ValidateControlPlaneLabels)
runCuratedPackagesPrometheusInstall(test)
test.DeleteCluster()
test.PowerOffHardware()
test.ValidateHardwareDecommissioned()
}
| 70 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/test/framework"
)
func runProxyConfigFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.DeleteCluster()
}
| 15 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/test/framework"
)
func runRegistryMirrorConfigFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.DownloadArtifacts()
test.ExtractDownloadedArtifacts()
test.DownloadImages()
test.ImportImages()
test.CreateCluster(framework.WithBundlesOverride(bundleReleasePathFromArtifacts))
test.DeleteCluster(framework.WithBundlesOverride(bundleReleasePathFromArtifacts))
}
func runTinkerbellRegistryMirrorFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.DownloadArtifacts()
test.ExtractDownloadedArtifacts()
test.DownloadImages()
test.ImportImages()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithForce(), framework.WithBundlesOverride(bundleReleasePathFromArtifacts))
test.StopIfFailed()
test.DeleteCluster(framework.WithBundlesOverride(bundleReleasePathFromArtifacts))
test.ValidateHardwareDecommissioned()
test.CleanupDownloadedArtifactsAndImages()
}
| 34 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"fmt"
"os"
"testing"
"time"
"github.com/aws/eks-anywhere/internal/pkg/api"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/test/framework"
)
type eksaPackagedBinary interface {
framework.PackagedBinary
// Version returns the EKS-A version.
Version() string
}
// runFlowUpgradeManagementClusterCheckForSideEffects creates management and workload cluster
// with a specific eks-a version then upgrades the management cluster with another CLI version
// and checks that this doesn't cause any side effects (machine rollout) in the workload clusters.
func runFlowUpgradeManagementClusterCheckForSideEffects(test *framework.MulticlusterE2ETest, currentEKSA, newEKSA eksaPackagedBinary) {
test.T.Logf("Creating management cluster with EKS-A version %s", currentEKSA.Version())
test.CreateManagementCluster(framework.ExecuteWithBinary(currentEKSA))
test.T.Logf("Creating workload clusters with EKS-A version %s", currentEKSA.Version())
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.CreateCluster(framework.ExecuteWithBinary(currentEKSA))
})
waitForWorkloadCustersMachineDeploymentsReady(test.ManagementCluster, test.WorkloadClusters)
preUpgradeWorkloadClustersState := buildWorkloadClustersWithMachines(test.ManagementCluster, test.WorkloadClusters)
test.T.Log("Machines state for workload clusters after first creation")
printStateOfMachines(test.ManagementCluster.ClusterConfig.Cluster, preUpgradeWorkloadClustersState)
test.T.Logf("Upgrading management cluster with EKS-A version %s", newEKSA.Version())
test.ManagementCluster.UpgradeCluster(framework.ExecuteWithBinary(newEKSA))
checker := machineSideEffectChecker{
tb: test.T,
checkDuration: 10 * time.Minute,
waitInBetweenTries: 20 * time.Second,
}
if changed, reason := checker.haveMachinesChanged(test.ManagementCluster, preUpgradeWorkloadClustersState); changed {
test.T.Fatalf("The new controller has had cascading changes: %s", reason)
}
test.T.Log("Your management cluster upgrade didn't create or delete machines in the workload clusters. Congrats!")
if os.Getenv("MANUAL_TEST_PAUSE") == "true" {
test.T.Log("Press enter to continue with the cleanup after you are done with your manual investigation: ")
fmt.Scanln()
}
test.T.Log("Machines state for workload clusters after management cluster upgrade")
printStateOfMachines(test.ManagementCluster.ClusterConfig.Cluster, buildWorkloadClustersWithMachines(test.ManagementCluster, test.WorkloadClusters))
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.DeleteCluster()
})
test.DeleteManagementCluster()
}
type clusterWithMachines struct {
name string
machines clusterMachines
}
type clusterMachines map[string]types.Machine
func anyMachinesChanged(original clusterMachines, current clusterMachines) (changed bool, reason string) {
if len(original) != len(current) {
return true, fmt.Sprintf("Different number of machines: before %d, after %d", len(original), len(current))
}
for machineName := range original {
if m, found := current[machineName]; !found {
return true, fmt.Sprintf("Machine %s not present in current cluster", m.Metadata.Name)
}
}
return false, ""
}
func printStateOfMachines(managementCluster *anywherev1.Cluster, clusters []clusterWithMachines) {
fmt.Println(managementCluster.Name)
for _, cluster := range clusters {
fmt.Printf("├── %s\n", cluster.name)
for _, m := range cluster.machines {
fmt.Printf("│ ├── %s\n", m.Metadata.Name)
fmt.Print("│ │ ├── Labels\n")
for k, v := range m.Metadata.Labels {
fmt.Printf("│ │ │ ├── %s: %s\n", k, v)
}
fmt.Print("│ │ ├── Conditions\n")
for _, c := range m.Status.Conditions {
fmt.Printf("│ │ │ ├── %s: %s\n", c.Type, c.Status)
}
}
}
}
func buildClusterWithMachines(managementCluster *framework.ClusterE2ETest, clusterName string) clusterWithMachines {
managementCluster.T.Logf("Reading CAPI machines for cluster %s", clusterName)
return clusterWithMachines{
name: clusterName,
machines: managementCluster.GetCapiMachinesForCluster(clusterName),
}
}
func buildWorkloadClustersWithMachines(managementCluster *framework.ClusterE2ETest, workloadClusters framework.WorkloadClusters) []clusterWithMachines {
cm := make([]clusterWithMachines, 0, len(workloadClusters))
for _, w := range workloadClusters {
cm = append(cm, buildClusterWithMachines(managementCluster, w.ClusterName))
}
return cm
}
func waitForMachineDeploymentReady(managementCluster *framework.ClusterE2ETest, cluster *anywherev1.Cluster, workerNodeGroup anywherev1.WorkerNodeGroupConfiguration) {
machineDeploymetName := clusterapi.MachineDeploymentName(cluster, workerNodeGroup)
managementCluster.WaitForMachineDeploymentReady(machineDeploymetName)
}
func waitForClusterMachineDeploymentsReady(managementCluster *framework.ClusterE2ETest, cluster *anywherev1.Cluster) {
for _, w := range cluster.Spec.WorkerNodeGroupConfigurations {
waitForMachineDeploymentReady(managementCluster, cluster, w)
}
}
func waitForWorkloadCustersMachineDeploymentsReady(managementCluster *framework.ClusterE2ETest, workloadClusters framework.WorkloadClusters) {
for _, w := range workloadClusters {
waitForClusterMachineDeploymentsReady(managementCluster, w.ClusterConfig.Cluster)
}
}
type machineSideEffectChecker struct {
tb testing.TB
checkDuration, waitInBetweenTries time.Duration
}
func (m machineSideEffectChecker) haveMachinesChanged(managementCluster *framework.ClusterE2ETest, preUpgradeWorkloadClustersState []clusterWithMachines) (changed bool, changeReason string) {
m.tb.Logf("Checking for changes in machines for %s", m.checkDuration)
start := time.Now()
retry:
for now := time.Now(); now.Sub(start) <= m.checkDuration; now = time.Now() {
for _, workloadCluster := range preUpgradeWorkloadClustersState {
m.tb.Logf("Reading CAPI machines for cluster %s", workloadCluster.name)
postUpgradeMachines, err := managementCluster.CapiMachinesForCluster(workloadCluster.name)
if err != nil {
m.tb.Logf("Failed getting machines for cluster %s, omitting error in case it's transient: %s", workloadCluster.name, err)
continue
}
if changed, changeReason = anyMachinesChanged(workloadCluster.machines, postUpgradeMachines); changed {
changeReason = fmt.Sprintf("cluster %s has chaged: %s", workloadCluster.name, changeReason)
break retry
}
m.tb.Logf("Machines for workload cluster %s haven't changed after upgrading the management cluster", workloadCluster.name)
}
m.tb.Logf("Waiting for %s until next check", m.waitInBetweenTries)
time.Sleep(m.waitInBetweenTries)
}
return changed, changeReason
}
// eksaLocalPackagedBinary implements eksaPackagedBinary using the local eks-a binary
// being tested by this suite.
type eksaLocalPackagedBinary struct {
path, version string
}
func (b eksaLocalPackagedBinary) BinaryPath() (string, error) {
return b.path, nil
}
func (b eksaLocalPackagedBinary) Version() string {
return b.version
}
func newEKSAPackagedBinaryForLocalBinary(tb testing.TB) eksaPackagedBinary {
tb.Helper()
version, err := framework.EKSAVersionForTestBinary()
if err != nil {
tb.Fatal(err)
}
path, err := framework.DefaultLocalEKSABinaryPath()
if err != nil {
tb.Fatal(err)
}
return eksaLocalPackagedBinary{
path: path,
version: version,
}
}
func runTestManagementClusterUpgradeSideEffects(t *testing.T, provider framework.Provider, osFamily anywherev1.OSFamily, kubeVersion anywherev1.KubernetesVersion) {
latestRelease := latestMinorRelease(t)
managementCluster := framework.NewClusterE2ETest(t, provider, framework.PersistentCluster())
managementCluster.GenerateClusterConfigForVersion(latestRelease.Version, framework.ExecuteWithEksaRelease(latestRelease))
managementCluster.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithEtcdCountIfExternal(1),
),
provider.WithKubeVersionAndOS(osFamily, kubeVersion),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
workloadCluster := framework.NewClusterE2ETest(t, provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
)
workloadCluster.GenerateClusterConfigForVersion(latestRelease.Version, framework.ExecuteWithEksaRelease(latestRelease))
workloadCluster.UpdateClusterConfig(api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(2),
api.WithControlPlaneLabel("cluster.x-k8s.io/failure-domain", "ds.meta_data.failuredomain"),
api.RemoveAllWorkerNodeGroups(),
api.WithWorkerNodeGroup("workers-0",
api.WithCount(3),
api.WithLabel("cluster.x-k8s.io/failure-domain", "ds.meta_data.failuredomain"),
),
api.WithEtcdCountIfExternal(3),
api.WithCiliumPolicyEnforcementMode(anywherev1.CiliumPolicyModeAlways)),
provider.WithNewWorkerNodeGroup("workers-0",
framework.WithWorkerNodeGroup("workers-0",
api.WithCount(2),
api.WithLabel("cluster.x-k8s.io/failure-domain", "ds.meta_data.failuredomain"))),
framework.WithOIDCClusterConfig(t),
provider.WithKubeVersionAndOS(osFamily, kubeVersion),
)
test.WithWorkloadClusters(workloadCluster)
runFlowUpgradeManagementClusterCheckForSideEffects(test,
framework.NewEKSAReleasePackagedBinary(latestRelease),
newEKSAPackagedBinaryForLocalBinary(t),
)
}
| 253 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/test/framework"
)
func runSimpleFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.DeleteCluster()
}
func runTinkerbellSimpleFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
| 24 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/test/framework"
)
func runTinkerbellSingleNodeFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneNoTaints, framework.ValidateControlPlaneLabels)
test.DeleteCluster()
test.PowerOffHardware()
test.ValidateHardwareDecommissioned()
}
| 20 |
eks-anywhere | aws | Go | //go:build e2e && (snow || all_providers)
// +build e2e
// +build snow all_providers
package e2e
import (
"testing"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
// AWS IAM Auth
func TestSnowKubernetes127UbuntuAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu127()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runAWSIamAuthFlow(test)
}
func TestSnowKubernetes126To127AWSIamAuthUpgrade(t *testing.T) {
provider := framework.NewSnow(t, framework.WithSnowUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runUpgradeFlowWithAWSIamAuth(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
}
// Labels
func TestSnowKubernetes127UbuntuLabelsUpgradeFlow(t *testing.T) {
provider := framework.NewSnow(t,
framework.WithSnowWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(worker0, api.WithCount(1), api.WithLabel(key1, val1)),
),
framework.WithSnowWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1), api.WithLabel(key2, val2)),
),
framework.WithSnowUbuntu127(),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(),
),
)
runLabelsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val2)),
api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val1)),
api.WithControlPlaneLabel(cpKey1, cpVal1),
),
)
}
// OIDC
func TestSnowKubernetes127OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu127()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
// Proxy config
func TestSnowKubernetes127UbuntuProxyConfig(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
// TODO: provide separate Proxy Env Vars for Snow provider. Leaving VSphere for backwards compatibility
framework.WithProxy(framework.VsphereProxyRequiredEnvVars),
)
runProxyConfigFlow(test)
}
// Simpleflow
func TestSnowKubernetes123SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestSnowKubernetes124SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestSnowKubernetes125SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runSimpleFlow(test)
}
func TestSnowKubernetes126SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleFlow(test)
}
func TestSnowKubernetes127SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewSnow(t, framework.WithSnowUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runSimpleFlow(test)
}
// Taints
func TestSnowKubernetes127UbuntuTaintsUpgradeFlow(t *testing.T) {
provider := framework.NewSnow(t,
framework.WithSnowWorkerNodeGroup(
worker0,
framework.NoScheduleWorkerNodeGroup(worker0, 1),
),
framework.WithSnowWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithSnowWorkerNodeGroup(
worker2,
framework.PreferNoScheduleWorkerNodeGroup(worker2, 1),
),
framework.WithSnowUbuntu127(),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(),
),
)
runTaintsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker2, api.WithNoTaints()),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
// Upgrade
func TestSnowKubernetes126To127UbuntuMultipleFieldsUpgrade(t *testing.T) {
provider := framework.NewSnow(t, framework.WithSnowUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube126),
api.WithControlPlaneCount(3),
api.WithWorkerNodeCount(1),
),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(2),
),
provider.WithProviderUpgrade(
api.WithSnowInstanceTypeForAllMachines("sbe-c.xlarge"),
api.WithSnowPhysicalNetworkConnectorForAllMachines(v1alpha1.QSFP),
),
)
}
func TestSnowKubernetes127UbuntuRemoveWorkerNodeGroups(t *testing.T) {
provider := framework.NewSnow(t,
framework.WithSnowWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(worker0, api.WithCount(1)),
),
framework.WithSnowWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithSnowUbuntu127(),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(),
),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.RemoveWorkerNodeGroup(worker1),
api.WithWorkerNodeGroup(worker0, api.WithCount(1)),
),
provider.WithNewSnowWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(
worker2,
api.WithCount(1),
),
),
)
}
func TestSnowKubernetes123UbuntuTo124Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithUbuntu123(), snow.WithUbuntu124())
}
func TestSnowKubernetes124UbuntuTo125Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithUbuntu124(), snow.WithUbuntu125())
}
func TestSnowKubernetes125UbuntuTo126Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithUbuntu125(), snow.WithUbuntu126())
}
func TestSnowKubernetes126UbuntuTo127Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithUbuntu126(), snow.WithUbuntu127())
}
func TestSnowKubernetes123BottlerocketTo124Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocket123(), snow.WithBottlerocket124())
}
func TestSnowKubernetes124BottlerocketTo125Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocket124(), snow.WithBottlerocket125())
}
func TestSnowKubernetes125BottlerocketTo126Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocket125(), snow.WithBottlerocket126())
}
func TestSnowKubernetes126BottlerocketTo127Upgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocket126(), snow.WithBottlerocket127())
}
func TestSnowKubernetes123To124BottlerocketStaticIPUpgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocketStaticIP123(), snow.WithBottlerocketStaticIP124())
}
func TestSnowKubernetes124To125BottlerocketStaticIPUpgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocketStaticIP124(), snow.WithBottlerocketStaticIP125())
}
func TestSnowKubernetes125To126BottlerocketStaticIPUpgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocketStaticIP125(), snow.WithBottlerocketStaticIP126())
}
func TestSnowKubernetes126To127BottlerocketStaticIPUpgrade(t *testing.T) {
snow := framework.NewSnow(t)
test := framework.NewClusterE2ETest(t, snow)
runSnowUpgradeTest(test, snow, snow.WithBottlerocketStaticIP126(), snow.WithBottlerocketStaticIP127())
}
// Workload API
func TestSnowMulticlusterWorkloadClusterAPI(t *testing.T) {
snow := framework.NewSnow(t)
managementCluster := framework.NewClusterE2ETest(
t, snow,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
snow.WithBottlerocket124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, snow, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
snow.WithBottlerocket123(),
),
framework.NewClusterE2ETest(
t, snow, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
snow.WithBottlerocket124(),
),
framework.NewClusterE2ETest(
t, snow, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
snow.WithBottlerocket125(),
),
framework.NewClusterE2ETest(
t, snow, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
snow.WithBottlerocket126(),
),
)
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.DeleteClusterWithKubectl()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
| 409 |
eks-anywhere | aws | Go | //go:build e2e
package e2e
import (
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/test/framework"
)
// runSnowUpgradeTest creates a Snow cluster using a base configuration (multiple workers, labels, taints, etc.) plus the provided changes,
// upgrades it to the given changes and deletes it after validating its state. This is meant to be used for Snow full upgrade flow tests.
// It tries to test as many possible changes as possible: control plane scaling, worker node group scaling, wortker node group addition and
// removal, taints, labels, etc. It allows for extra customization through the baseAPIChanges and upgradeAPIChanges but the only required
// changes for those arguments are the osFamily and kubernetes version, since they are not set by default. These can all be provided
// using the provide methods `WithUbuntu124`, `WithBottlerocket124, etc.
func runSnowUpgradeTest(test *framework.ClusterE2ETest, snow *framework.Snow, baseAPIChanges, upgradeAPIChanges api.ClusterConfigFiller) {
test.WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(3),
api.WithStackedEtcdTopology(),
),
snow.WithWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(
worker0,
api.WithCount(1),
api.WithLabel(key1, val2),
),
api.WithDHCP(),
),
snow.WithWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(
worker1,
api.WithCount(1),
api.WithTaint(framework.NoScheduleTaint())),
api.WithDHCP(),
),
baseAPIChanges,
)
runUpgradeFlow(test,
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.RemoveWorkerNodeGroup(worker1),
api.WithWorkerNodeGroup(worker0, api.WithCount(2)),
),
snow.WithWorkerNodeGroup(
worker2,
framework.WithWorkerNodeGroup(worker2, api.WithCount(1)),
api.WithDHCP(),
),
upgradeAPIChanges,
)
}
// runUpgradeFlow creates a cluster, upgrades it with the given changes with the CLI, validates it and finally deletes it if
// all previous steps are successfull. This is represents a basic user workflow and is meant for standalone cluster's CLI tests.
func runUpgradeFlow(test *framework.ClusterE2ETest, upgradeChanges ...api.ClusterConfigFiller) {
test.CreateCluster()
validateCluster(test)
test.StopIfFailed()
test.UpdateClusterConfig(upgradeChanges...)
test.UpgradeCluster()
validateCluster(test)
test.StopIfFailed()
test.DeleteCluster()
}
// validateCluster performs a set of validations comparing the cluster config definition with
// the current state of the cluster. This is meant to be used after a create or upgrade operation
// to make sure the cluster has reached the desired state. This should eventually be replaced by
// the cluster validator.
func validateCluster(test *framework.ClusterE2ETest) {
test.ValidateCluster(test.ClusterConfig.Cluster.Spec.KubernetesVersion)
test.ValidateWorkerNodes(framework.ValidateWorkerNodeTaints, framework.ValidateWorkerNodeLabels)
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneTaints, framework.ValidateControlPlaneLabels)
}
| 79 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/test/framework"
)
func runStackedEtcdFlow(test *framework.ClusterE2ETest) {
test.GenerateClusterConfig()
test.CreateCluster()
test.DeleteCluster()
}
| 15 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runTaintsUpgradeFlow(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfig()
test.CreateCluster()
test.ValidateWorkerNodes(framework.ValidateWorkerNodeTaints)
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneTaints)
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(updateVersion)
test.ValidateWorkerNodes(framework.ValidateWorkerNodeTaints)
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneTaints)
test.StopIfFailed()
test.DeleteCluster()
}
| 23 |
eks-anywhere | aws | Go | //go:build e2e && (tinkerbell || all_providers)
// +build e2e
// +build tinkerbell all_providers
package e2e
import (
"testing"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/networkutils"
"github.com/aws/eks-anywhere/test/framework"
)
// AWS IAM Auth
func TestTinkerbellKubernetes123AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellAWSIamAuthFlow(test)
}
func TestTinkerbellKubernetes127AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellAWSIamAuthFlow(test)
}
func TestTinkerbellKubernetes123BottleRocketAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellAWSIamAuthFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellAWSIamAuthFlow(test)
}
// Upgrade
func TestTinkerbellKubernetes127UbuntuWorkerNodeUpgrade(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(2),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(2)),
)
}
func TestTinkerbellKubernetes125UbuntuWorkerNodeScaleUpWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(2),
)
runUpgradeFlowForBareMetalWithAPI(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeCount(2),
),
)
}
func TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithCustomLabelHardware(1, "worker-0"),
)
runUpgradeFlowForBareMetalWithAPI(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeGroup("worker-0",
api.WithCount(1),
api.WithMachineGroupRef("worker-0", "TinkerbellMachineConfig"),
),
),
api.TinkerbellToConfigFiller(
api.WithCustomTinkerbellMachineConfig("worker-0",
framework.UpdateTinkerbellMachineSSHAuthorizedKey(),
api.WithOsFamilyForTinkerbellMachineConfig(v1alpha1.Ubuntu),
),
),
)
}
// Curated packages
func TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes127BottleRocketSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes127UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes126UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes126BottleRocketSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes126UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes126BottleRocketSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes126UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes126BottleRocketSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes126UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes126BottleRocketCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes126UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes126BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube126),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes125UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes125BottleRocketSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes125UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes125BottleRocketSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes125UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes125BottleRocketSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes125UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes125BottleRocketCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes125BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube125),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes124UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes124BottleRocketSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes124UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes124BottleRocketSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes124UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes124BottleRocketSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes124UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes124BottleRocketCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes124UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes124BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube124),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes123UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes123BottleRocketSingleNodeCuratedPackagesFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes123UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes123BottleRocketSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes123UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes123BottleRocketSingleNodeCuratedPackagesHarborFlow(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestTinkerbellKubernetes123UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes123BottleRocketCuratedPackagesAdotSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes123UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes123BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube123),
framework.WithControlPlaneHardware(1),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test)
}
// Single node
func TestTinkerbellKubernetes127BottleRocketSingleNodeSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
framework.WithControlPlaneHardware(1),
)
runTinkerbellSingleNodeFlow(test)
}
func TestTinkerbellKubernetes127UbuntuSingleNodeSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
framework.WithControlPlaneHardware(1),
)
runTinkerbellSingleNodeFlow(test)
}
// Multicluster
func TestTinkerbellKubernetes127UbuntuWorkloadCluster(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
),
)
runTinkerbellWorkloadClusterFlow(test)
}
func TestTinkerbellKubernetes127UbuntuWorkloadClusterWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
),
),
)
runWorkloadClusterWithAPIFlowForBareMetal(test)
}
func TestTinkerbellKubernetes127UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
).WithClusterConfig(
framework.WithFluxGithubConfig(),
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
),
),
)
runWorkloadClusterGitOpsAPIFlowForBareMetal(test)
}
func TestTinkerbellKubernetes127BottlerocketWorkloadClusterSimpleFlow(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
),
)
runTinkerbellWorkloadClusterFlow(test)
}
func TestTinkerbellKubernetes127BottlerocketWorkloadClusterWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
),
),
)
runWorkloadClusterWithAPIFlowForBareMetal(test)
}
func TestTinkerbellKubernetes127UbuntuSingleNodeWorkloadCluster(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(0),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
),
)
runTinkerbellWorkloadClusterFlow(test)
}
func TestTinkerbellKubernetes127UbuntuSingleNodeWorkloadClusterWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(0),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
),
)
runWorkloadClusterWithAPIFlowForBareMetal(test)
}
func TestTinkerbellKubernetes127BottlerocketSingleNodeWorkloadCluster(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(0),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
),
)
runTinkerbellWorkloadClusterFlow(test)
}
func TestTinkerbellKubernetes127BottlerocketSingleNodeWorkloadClusterWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(0),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
),
)
runWorkloadClusterWithAPIFlowForBareMetal(test)
}
func TestTinkerbellKubernetes127BottlerocketWorkloadClusterSkipPowerActions(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithNoPowerActions(),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithNoPowerActions(),
),
)
runTinkerbellWorkloadClusterFlowSkipPowerActions(test)
}
func TestTinkerbellUpgrade127MulticlusterWorkloadClusterWorkerScaleup(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(3),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
),
)
runSimpleWorkloadUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithWorkerNodeCount(2),
),
)
}
func TestTinkerbellSingleNode125ManagementScaleupWorkloadWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube125),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube125),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithEtcdCountIfExternal(0),
),
),
)
runWorkloadClusterUpgradeFlowWithAPIForBareMetal(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeCount(2),
),
)
}
func TestTinkerbellKubernetes123UbuntuTo124Upgrade(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(framework.UpdateTinkerbellUbuntuTemplate124Var()),
)
}
func TestTinkerbellKubernetes124UbuntuTo125Upgrade(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube125,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube125)),
provider.WithProviderUpgrade(framework.UpdateTinkerbellUbuntuTemplate125Var()),
)
}
func TestTinkerbellKubernetes125UbuntuTo126Upgrade(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube126,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)),
provider.WithProviderUpgrade(framework.UpdateTinkerbellUbuntuTemplate126Var()),
)
}
func TestTinkerbellKubernetes126UbuntuTo127Upgrade(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(framework.UpdateTinkerbellUbuntuTemplate127Var()),
)
}
func TestTinkerbellUpgrade127MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(2),
framework.WithFluxGithubEnvVarCheck(),
framework.WithFluxGithubCleanup(),
).WithClusterConfig(
framework.WithFluxGithubConfig(),
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.RemoveAllWorkerNodeGroups(),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
),
),
)
runWorkloadClusterGitOpsAPIUpgradeFlowForBareMetal(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeCount(2),
),
)
}
func TestTinkerbellUpgrade127MulticlusterWorkloadClusterCPScaleup(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(4),
framework.WithWorkerHardware(1),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
),
)
runSimpleWorkloadUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(3),
),
)
}
func TestTinkerbellUpgrade127MulticlusterWorkloadClusterWorkerScaleDown(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(2),
framework.WithWorkerHardware(3),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
),
)
runSimpleWorkloadUpgradeFlowForBareMetal(
test,
v1alpha1.Kube126,
framework.WithClusterUpgrade(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithWorkerNodeCount(1),
),
)
}
func TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade126To127(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(3),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
),
)
runSimpleWorkloadUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(framework.UpdateTinkerbellUbuntuTemplate127Var()),
)
}
func TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade124To125WithAPI(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell())
managementCluster := framework.NewClusterE2ETest(
t,
provider,
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(1),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
),
)
test := framework.NewMulticlusterE2ETest(
t,
managementCluster,
)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
),
),
)
runWorkloadClusterUpgradeFlowWithAPIForBareMetal(test,
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube125),
),
)
}
// Nodes powered on
func TestTinkerbellKubernetes127WithNodesPoweredOn(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.PowerOnHardware()
test.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
// OIDC
func TestTinkerbellKubernetes127OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellOIDCFlow(test)
}
// Registry mirror
func TestTinkerbellKubernetes127UbuntuRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithRegistryMirrorEndpointAndCert(constants.TinkerbellProviderName),
)
runTinkerbellRegistryMirrorFlow(test)
}
func TestTinkerbellKubernetes127UbuntuInsecureSkipVerifyRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithRegistryMirrorInsecureSkipVerify(constants.TinkerbellProviderName),
)
runTinkerbellRegistryMirrorFlow(test)
}
func TestTinkerbellKubernetes127BottlerocketRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithRegistryMirrorEndpointAndCert(constants.TinkerbellProviderName),
)
runTinkerbellRegistryMirrorFlow(test)
}
func TestTinkerbellKubernetes127UbuntuAuthenticatedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithAuthenticatedRegistryMirror(constants.TinkerbellProviderName),
)
runTinkerbellRegistryMirrorFlow(test)
}
func TestTinkerbellKubernetes127BottlerocketAuthenticatedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithAuthenticatedRegistryMirror(constants.TinkerbellProviderName),
)
runTinkerbellRegistryMirrorFlow(test)
}
// Simpleflow
func TestTinkerbellKubernetes123UbuntuSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu123Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes124UbuntuSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu124Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes125UbuntuSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes126SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127UbuntuSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes123RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithRedHat123Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes124RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithRedHat124Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes125RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithRedHat125Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes126RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithRedHat126Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithRedHat127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes123BottleRocketSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes124BottleRocketSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes125BottleRocketSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes126BottleRocketSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127UbuntuThreeControlPlaneReplicasSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketThreeControlPlaneReplicasSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(1),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127UbuntuThreeWorkersSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(3),
)
runTinkerbellSimpleFlow(test)
}
func TestTinkerbellKubernetes127BottleRocketThreeWorkersSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithBottleRocketTinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(3),
)
runTinkerbellSimpleFlow(test)
}
// Skip power actions
func TestTinkerbellKubernetes127SkipPowerActions(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithNoPowerActions(),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
)
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.PXEBootHardware()
test.PowerOnHardware()
test.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
test.DeleteCluster()
test.PowerOffHardware()
test.ValidateHardwareDecommissioned()
}
func TestTinkerbellKubernetes127SingleNodeSkipPowerActions(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()),
framework.WithClusterSingleNode(v1alpha1.Kube127),
framework.WithNoPowerActions(),
framework.WithControlPlaneHardware(1),
)
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.PXEBootHardware()
test.PowerOnHardware()
test.CreateCluster(framework.WithForce(), framework.WithControlPlaneWaitTimeout("20m"))
test.DeleteCluster()
test.PowerOffHardware()
test.ValidateHardwareDecommissioned()
}
func TestTinkerbellKubernetes127UbuntuControlPlaneScaleUp(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(1),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithControlPlaneCount(3)),
)
}
func TestTinkerbellKubernetes127UbuntuWorkerNodeScaleUp(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(2),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(2)),
)
}
func TestTinkerbellKubernetes127UbuntuWorkerNodeScaleDown(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(2)),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(2),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)),
)
}
func TestTinkerbellKubernetes127UbuntuControlPlaneScaleDown(t *testing.T) {
provider := framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithControlPlaneHardware(3),
framework.WithWorkerHardware(1),
)
runSimpleUpgradeFlowForBareMetal(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithControlPlaneCount(1)),
)
}
// Worker nodegroup taints and labels
func TestTinkerbellKubernetes127UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(
t,
framework.WithUbuntu127Tinkerbell(),
framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel1),
framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel2),
),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneLabel(cpKey1, cpVal1),
api.WithControlPlaneTaints([]corev1.Taint{framework.NoScheduleTaint()}),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup(worker0, api.WithMachineGroupRef(nodeGroupLabel1, "TinkerbellMachineConfig"), api.WithTaint(framework.PreferNoScheduleTaint()), api.WithLabel(key1, val1), api.WithCount(1)),
api.WithWorkerNodeGroup(worker1, api.WithMachineGroupRef(nodeGroupLabel2, "TinkerbellMachineConfig"), api.WithLabel(key2, val2), api.WithCount(1)),
),
framework.WithControlPlaneHardware(1),
framework.WithCustomLabelHardware(1, nodeGroupLabel1),
framework.WithCustomLabelHardware(1, nodeGroupLabel2),
)
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateWorkerNodes(framework.ValidateWorkerNodeTaints, framework.ValidateWorkerNodeLabels)
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneTaints, framework.ValidateControlPlaneLabels)
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
func TestTinkerbellKubernetes127BottlerocketWorkerNodeGroupsTaintsAndLabels(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(
t,
framework.WithBottleRocketTinkerbell(),
framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel1),
framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel2),
),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneLabel(cpKey1, cpVal1),
api.WithControlPlaneTaints([]corev1.Taint{framework.NoScheduleTaint()}),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup(worker0, api.WithMachineGroupRef(nodeGroupLabel1, "TinkerbellMachineConfig"), api.WithTaint(framework.PreferNoScheduleTaint()), api.WithLabel(key1, val1), api.WithCount(1)),
api.WithWorkerNodeGroup(worker1, api.WithMachineGroupRef(nodeGroupLabel2, "TinkerbellMachineConfig"), api.WithLabel(key2, val2), api.WithCount(1)),
),
framework.WithControlPlaneHardware(1),
framework.WithCustomLabelHardware(1, nodeGroupLabel1),
framework.WithCustomLabelHardware(1, nodeGroupLabel2),
)
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.ValidateWorkerNodes(framework.ValidateWorkerNodeTaints, framework.ValidateWorkerNodeLabels)
test.ValidateControlPlaneNodes(framework.ValidateControlPlaneTaints, framework.ValidateControlPlaneLabels)
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
// Airgapped tests
func TestTinkerbellAirgappedKubernetes127BottleRocketRegistryMirror(t *testing.T) {
localIp, err := networkutils.GetLocalIP()
if err != nil {
t.Fatalf("Cannot get admin machine local IP: %v", err)
}
t.Logf("Admin machine's IP is: %s", localIp)
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t,
framework.WithBottleRocketTinkerbell(),
framework.WithHookImagesURLPath("http://"+localIp.String()+":8080"),
framework.WithOSImageURL("http://"+localIp.String()+":8080/"+bottlerocketOSFileName),
),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithRegistryMirrorEndpointAndCert(constants.TinkerbellProviderName),
)
runTinkerbellAirgapConfigFlow(test, "10.80.0.0/16")
}
// Proxy tests
func TestTinkerbellAirgappedKubernetes127BottlerocketProxyConfigFlow(t *testing.T) {
localIp, err := networkutils.GetLocalIP()
if err != nil {
t.Fatalf("Cannot get admin machine local IP: %v", err)
}
t.Logf("Admin machine's IP is: %s", localIp)
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t,
framework.WithBottleRocketTinkerbell(),
framework.WithHookImagesURLPath("http://"+localIp.String()+":8080"),
),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithProxy(framework.TinkerbellProxyRequiredEnvVars),
)
runTinkerbellAirgapConfigProxyFlow(test, "10.80.0.0/16")
}
func TestTinkerbellAirgappedKubernetes127UbuntuProxyConfigFlow(t *testing.T) {
localIp, err := networkutils.GetLocalIP()
if err != nil {
t.Fatalf("Cannot get admin machine local IP: %v", err)
}
t.Logf("Admin machine's IP is: %s", localIp)
test := framework.NewClusterE2ETest(
t,
framework.NewTinkerbell(t,
framework.WithUbuntu127Tinkerbell(),
framework.WithHookImagesURLPath("http://"+localIp.String()+":8080"),
),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
),
framework.WithControlPlaneHardware(1),
framework.WithWorkerHardware(1),
framework.WithProxy(framework.TinkerbellProxyRequiredEnvVars),
)
runTinkerbellAirgapConfigProxyFlow(test, "10.80.0.0/16")
}
| 1,935 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func runSimpleUpgradeFlow(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfig()
test.CreateCluster()
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(updateVersion)
test.StopIfFailed()
test.DeleteCluster()
}
func runUpgradeFlowWithCheckpoint(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts []framework.ClusterE2ETestOpt, clusterOpts2 []framework.ClusterE2ETestOpt, commandOpts []framework.CommandOpt) {
test.GenerateClusterConfig()
test.CreateCluster()
test.UpgradeClusterWithNewConfig(clusterOpts, commandOpts...)
test.UpgradeClusterWithNewConfig(clusterOpts2)
test.ValidateCluster(updateVersion)
test.StopIfFailed()
test.DeleteCluster()
}
func runSimpleUpgradeFlowForBareMetal(test *framework.ClusterE2ETest, updateVersion v1alpha1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(updateVersion)
test.StopIfFailed()
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
func runUpgradeFlowWithAPI(test *framework.ClusterE2ETest, fillers ...api.ClusterConfigFiller) {
test.CreateCluster()
test.LoadClusterConfigGeneratedByCLI()
test.UpdateClusterConfig(fillers...)
test.ApplyClusterManifest()
test.ValidateClusterState()
test.StopIfFailed()
test.DeleteCluster()
}
func runUpgradeFlowForBareMetalWithAPI(test *framework.ClusterE2ETest, fillers ...api.ClusterConfigFiller) {
test.GenerateClusterConfig()
test.GenerateHardwareConfig()
test.PowerOffHardware()
test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m"))
test.LoadClusterConfigGeneratedByCLI()
test.UpdateClusterConfig(fillers...)
test.ApplyClusterManifest()
test.ValidateClusterState()
test.StopIfFailed()
test.DeleteCluster()
test.ValidateHardwareDecommissioned()
}
func runWorkloadClusterUpgradeFlowAPI(test *framework.MulticlusterE2ETest, filler ...api.ClusterConfigFiller) {
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.UpdateClusterConfig(filler...)
wc.ApplyClusterManifest()
wc.ValidateClusterState()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func runWorkloadClusterUpgradeFlowAPIWithFlux(test *framework.MulticlusterE2ETest, filler ...api.ClusterConfigFiller) {
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
test.PushWorkloadClusterToGit(wc, filler...)
wc.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
| 97 |
eks-anywhere | aws | Go | //go:build e2e
// +build e2e
package e2e
import (
"testing"
"github.com/aws/eks-anywhere/internal/pkg/api"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/semver"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/test/framework"
)
func latestMinorRelease(t testing.TB) *releasev1.EksARelease {
t.Helper()
latestRelease, err := framework.GetLatestMinorReleaseFromTestBranch()
if err != nil {
t.Fatal(err)
}
return latestRelease
}
func prevLatestMinorRelease(t testing.TB) *releasev1.EksARelease {
t.Helper()
currLatestRelease := latestMinorRelease(t)
semCurrLatestRel, err := semver.New(currLatestRelease.Version)
if err != nil {
t.Fatal(err)
}
// Fetch the previous latest minor release for workload creation For ex. curr latest release 15.x prev latest minor release: 14.x
prevLatestRel, err := framework.GetPreviousMinorReleaseFromVersion(semCurrLatestRel)
if err != nil {
t.Fatal(err)
}
return prevLatestRel
}
func runUpgradeFromReleaseFlow(test *framework.ClusterE2ETest, latestRelease *releasev1.EksARelease, wantVersion anywherev1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfigForVersion(latestRelease.Version, framework.ExecuteWithEksaRelease(latestRelease))
test.CreateCluster(framework.ExecuteWithEksaRelease(latestRelease))
// Adding this manual wait because old versions of the cli don't wait long enough
// after creation, which makes the upgrade preflight validations fail
test.WaitForControlPlaneReady()
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(wantVersion)
test.StopIfFailed()
test.DeleteCluster()
}
func runUpgradeWithFluxFromReleaseFlow(test *framework.ClusterE2ETest, latestRelease *releasev1.EksARelease, wantVersion anywherev1.KubernetesVersion, clusterOpts ...framework.ClusterE2ETestOpt) {
test.GenerateClusterConfigForVersion(latestRelease.Version, framework.ExecuteWithEksaRelease(latestRelease))
test.CreateCluster(framework.ExecuteWithEksaRelease(latestRelease))
// Adding this manual wait because old versions of the cli don't wait long enough
// after creation, which makes the upgrade preflight validations fail
test.WaitForControlPlaneReady()
test.UpgradeClusterWithNewConfig(clusterOpts)
test.ValidateCluster(wantVersion)
test.ValidateFlux()
test.StopIfFailed()
test.DeleteCluster()
}
func runMulticlusterUpgradeFromReleaseFlowAPI(test *framework.MulticlusterE2ETest, release *releasev1.EksARelease, upgradeChanges api.ClusterConfigFiller) {
test.CreateManagementCluster(framework.ExecuteWithEksaRelease(release))
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.CreateCluster(framework.ExecuteWithEksaRelease(release))
wc.ValidateCluster(wc.ClusterConfig.Cluster.Spec.KubernetesVersion)
wc.StopIfFailed()
})
oldCluster := test.ManagementCluster.GetEKSACluster()
test.ManagementCluster.UpdateClusterConfig(upgradeChanges)
test.ManagementCluster.UpgradeCluster()
test.ManagementCluster.ValidateCluster(test.ManagementCluster.ClusterConfig.Cluster.Spec.KubernetesVersion)
test.ManagementCluster.StopIfFailed()
cluster := test.ManagementCluster.GetEKSACluster()
// Upgrade bundle workload clusters now because they still have the old versions of the bundle.
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.UpdateClusterConfig(
api.JoinClusterConfigFillers(upgradeChanges),
api.ClusterToConfigFiller(
api.WithBundlesRef(cluster.Spec.BundlesRef.Name, cluster.Spec.BundlesRef.Namespace, cluster.Spec.BundlesRef.APIVersion),
),
)
wc.ApplyClusterManifest()
wc.ValidateClusterState()
wc.StopIfFailed()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
wc.StopIfFailed()
})
// Create workload cluster with old bundle
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithBundlesRef(oldCluster.Spec.BundlesRef.Name, oldCluster.Spec.BundlesRef.Namespace, oldCluster.Spec.BundlesRef.APIVersion),
),
)
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.StopIfFailed()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
wc.StopIfFailed()
})
test.DeleteManagementCluster()
}
func runMulticlusterUpgradeFromReleaseFlowAPIWithFlux(test *framework.MulticlusterE2ETest, release *releasev1.EksARelease, upgradeChanges api.ClusterConfigFiller) {
test.CreateManagementCluster(framework.ExecuteWithEksaRelease(release))
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.CreateCluster(framework.ExecuteWithEksaRelease(release))
wc.ValidateCluster(wc.ClusterConfig.Cluster.Spec.KubernetesVersion)
wc.StopIfFailed()
})
oldCluster := test.ManagementCluster.GetEKSACluster()
test.ManagementCluster.UpdateClusterConfig(upgradeChanges)
test.ManagementCluster.UpgradeCluster()
test.ManagementCluster.ValidateCluster(test.ManagementCluster.ClusterConfig.Cluster.Spec.KubernetesVersion)
test.ManagementCluster.StopIfFailed()
cluster := test.ManagementCluster.GetEKSACluster()
// Upgrade bundle workload clusters now because they still have the old versions of the bundle.
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc,
api.JoinClusterConfigFillers(upgradeChanges),
api.ClusterToConfigFiller(
api.WithBundlesRef(cluster.Spec.BundlesRef.Name, cluster.Spec.BundlesRef.Namespace, cluster.Spec.BundlesRef.APIVersion),
),
)
wc.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
// Create workload cluster with old bundle
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc,
api.ClusterToConfigFiller(
api.WithBundlesRef(oldCluster.Spec.BundlesRef.Name, oldCluster.Spec.BundlesRef.Namespace, oldCluster.Spec.BundlesRef.APIVersion),
),
)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.DeleteManagementCluster()
}
| 168 |
eks-anywhere | aws | Go | //go:build e2e && (vsphere || all_providers)
// +build e2e
// +build vsphere all_providers
package e2e
import (
"os"
"testing"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/test/framework"
)
// Autoimport
func TestVSphereKubernetes123BottlerocketAutoimport(t *testing.T) {
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithTemplateForAllMachines(""),
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runAutoImportFlow(test, provider)
}
func TestVSphereKubernetes124BottlerocketAutoimport(t *testing.T) {
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithTemplateForAllMachines(""),
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runAutoImportFlow(test, provider)
}
func TestVSphereKubernetes125BottlerocketAutoimport(t *testing.T) {
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithTemplateForAllMachines(""),
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runAutoImportFlow(test, provider)
}
func TestVSphereKubernetes126BottlerocketAutoimport(t *testing.T) {
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithTemplateForAllMachines(""),
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runAutoImportFlow(test, provider)
}
func TestVSphereKubernetes127BottlerocketAutoimport(t *testing.T) {
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithTemplateForAllMachines(""),
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runAutoImportFlow(test, provider)
}
// AWS IAM Auth
func TestVSphereKubernetes123AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes124AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes125AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes126AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes127AWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes123BottleRocketAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes124BottleRocketAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes125BottleRocketAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes126BottleRocketAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes127BottleRocketAWSIamAuth(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runAWSIamAuthFlow(test)
}
func TestVSphereKubernetes126To127AWSIamAuthUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithAWSIam(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runUpgradeFlowWithAWSIamAuth(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(provider.Ubuntu127Template()),
)
}
// Curated packages
func TestVSphereKubernetes123CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes124CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes125CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes126CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes127CuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes123BottleRocketCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes124BottleRocketCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes125BottleRocketCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes126BottleRocketCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageInstallSimpleFlow(test)
}
func TestVSphereKubernetes123CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes124CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes125CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes126CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes127CuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes123BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes124BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes125BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes126BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageEmissaryInstallSimpleFlow(test)
}
func TestVSphereKubernetes123CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes124CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes125CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes126CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes127CuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes123BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes124BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes125BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes126BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes127BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test)
}
func TestVSphereKubernetes123CuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes124CuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes125CuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes126CuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes127CuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes123BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes124BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes125BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes126BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes127BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesAdotInstallUpdateFlow(test)
}
func TestVSphereKubernetes123UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes124UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes125UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes126UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes127UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes123BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes124BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes125BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes126BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) {
minNodes := 1
maxNodes := 2
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runAutoscalerWithMetricsServerSimpleFlow(test)
}
func TestVSphereKubernetes123UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes124UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes126UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes127UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes123BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube123),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes124BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube124),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes125BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes126BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube126),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube127),
EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI,
EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil),
)
runCuratedPackagesPrometheusInstallSimpleFlow(test)
}
func TestVSphereKubernetes123UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes124UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu125())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube126)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes127UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu127())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube127)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes123BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes124BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket125())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes126BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket126())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube126)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket127())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube127)
runCuratedPackageRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes123UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes124UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu125())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube126)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes127UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu127())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube127)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes123BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes124BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket125())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes126BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket126())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube126)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket127())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube127)
runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes123UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes124UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu125())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube126)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes127UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithUbuntu127())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube127)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes123BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket123())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube123)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes124BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket124())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube124)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket125())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes126BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket126())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube126)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) {
framework.CheckCuratedPackagesCredentials(t)
framework.CheckCertManagerCredentials(t)
provider := framework.NewVSphere(t, framework.WithBottleRocket127())
test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube127)
runCertManagerRemoteClusterInstallSimpleFlow(test)
}
// Download artifacts
func TestVSphereDownloadArtifacts(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127(), framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runDownloadArtifactsFlow(test)
}
// Flux
func TestVSphereKubernetes127GithubFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithFluxGithub(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
func TestVSphereKubernetes127GitFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithFluxGit(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
func TestVSphereKubernetes127BottleRocketGithubFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithFluxGithub(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
func TestVSphereKubernetes127BottleRocketGitFlux(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithFluxGit(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runFluxFlow(test)
}
func TestVSphereKubernetes126To127GitFluxUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(t,
provider,
framework.WithFluxGit(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFlowWithFlux(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(provider.Ubuntu127Template()),
)
}
func TestVSphereInstallGitFluxDuringUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu127())
test := framework.NewClusterE2ETest(t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFlowWithFlux(
test,
v1alpha1.Kube127,
framework.WithFluxGit(),
framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)),
)
}
// Labels
func TestVSphereKubernetes127UbuntuLabelsUpgradeFlow(t *testing.T) {
provider := ubuntu127ProviderWithLabels(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runLabelsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)),
api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)),
api.WithWorkerNodeGroup(worker2),
api.WithControlPlaneLabel(cpKey1, cpVal1),
),
)
}
func TestVSphereKubernetes127BottlerocketLabelsUpgradeFlow(t *testing.T) {
provider := bottlerocket127ProviderWithLabels(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runLabelsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)),
api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)),
api.WithWorkerNodeGroup(worker2),
api.WithControlPlaneLabel(cpKey1, cpVal1),
),
)
}
// Multicluster
func TestVSphereKubernetes127MulticlusterWorkloadCluster(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu127())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterFlow(test)
}
func TestVSphereUpgradeMulticlusterWorkloadClusterWithGithubFlux(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewMulticlusterE2ETest(
t,
framework.NewClusterE2ETest(
t,
provider,
framework.WithFluxGithub(),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube126),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
framework.NewClusterE2ETest(
t,
provider,
framework.WithFluxGithub(),
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube126),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
),
)
runWorkloadClusterFlowWithGitOps(
test,
framework.WithClusterUpgradeGit(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(3),
api.WithWorkerNodeCount(3),
),
provider.WithProviderUpgradeGit(
provider.Ubuntu127Template(),
),
)
}
// OIDC
func TestVSphereKubernetes123OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestVSphereKubernetes124OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestVSphereKubernetes125OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestVSphereKubernetes126OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestVSphereKubernetes127OIDC(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runOIDCFlow(test)
}
func TestVSphereKubernetes126To127OIDCUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithOIDC(),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFlowWithOIDC(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(provider.Ubuntu127Template()),
)
}
// Proxy config
func TestVSphereKubernetes127UbuntuProxyConfigFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127(),
framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithProxy(framework.VsphereProxyRequiredEnvVars),
)
runProxyConfigFlow(test)
}
func TestVSphereKubernetes127BottlerocketProxyConfigFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket127(),
framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithProxy(framework.VsphereProxyRequiredEnvVars),
)
runProxyConfigFlow(test)
}
// Registry mirror
func TestVSphereKubernetes127UbuntuRegistryMirrorInsecureSkipVerify(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127(), framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithRegistryMirrorInsecureSkipVerify(constants.VSphereProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestVSphereKubernetes127UbuntuRegistryMirrorAndCert(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127(), framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestVSphereKubernetes127BottlerocketRegistryMirrorAndCert(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket127(), framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestVSphereKubernetes127UbuntuAuthenticatedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127(), framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName),
)
runRegistryMirrorConfigFlow(test)
}
func TestVSphereKubernetes127BottlerocketAuthenticatedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket127(), framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName),
)
runRegistryMirrorConfigFlow(test)
}
// Clone mode
func TestVSphereKubernetes127FullClone(t *testing.T) {
diskSize := 30
vsphere := framework.NewVSphere(t,
framework.WithUbuntu127(),
framework.WithFullCloneMode(),
framework.WithDiskGiBForAllMachines(diskSize),
)
test := framework.NewClusterE2ETest(
t,
vsphere,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
)
runVSphereCloneModeFlow(test, vsphere, diskSize)
}
func TestVSphereKubernetes127LinkedClone(t *testing.T) {
diskSize := 20
vsphere := framework.NewVSphere(t,
framework.WithUbuntu127(),
framework.WithLinkedCloneMode(),
framework.WithDiskGiBForAllMachines(diskSize),
)
test := framework.NewClusterE2ETest(
t,
vsphere,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
)
runVSphereCloneModeFlow(test, vsphere, diskSize)
}
func TestVSphereKubernetes127BottlerocketFullClone(t *testing.T) {
diskSize := 30
vsphere := framework.NewVSphere(t,
framework.WithBottleRocket127(),
framework.WithFullCloneMode(),
framework.WithDiskGiBForAllMachines(diskSize),
)
test := framework.NewClusterE2ETest(
t,
vsphere,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
)
runVSphereCloneModeFlow(test, vsphere, diskSize)
}
func TestVSphereKubernetes127BottlerocketLinkedClone(t *testing.T) {
diskSize := 22
vsphere := framework.NewVSphere(t,
framework.WithBottleRocket127(),
framework.WithLinkedCloneMode(),
framework.WithDiskGiBForAllMachines(diskSize),
)
test := framework.NewClusterE2ETest(
t,
vsphere,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
)
runVSphereCloneModeFlow(test, vsphere, diskSize)
}
// Simpleflow
func TestVSphereKubernetes123SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes124SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes125SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes126SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes127SimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes123RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithRedHat123VSphere()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes124RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithRedHat124VSphere()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes125RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithRedHat125VSphere()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes126RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithRedHat126VSphere()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes127RedHatSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithRedHat127VSphere()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes123ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(5)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes123DifferentNamespaceSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu123(), framework.WithVSphereFillers(api.WithVSphereConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketThreeReplicasFiveWorkersSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithWorkerNodeCount(5)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes127BottleRocketDifferentNamespaceSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithBottleRocket127(),
framework.WithVSphereFillers(api.WithVSphereConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)),
)
runSimpleFlow(test)
}
func TestVSphereKubernetes127CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
)
runSimpleFlow(test)
}
// NTP Servers test
func TestVSphereKubernetes127BottleRocketWithNTP(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(
t, framework.WithBottleRocket127(),
framework.WithNTPServersForAllMachines(),
framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty
),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runNTPFlow(test, v1alpha1.Bottlerocket)
}
func TestVSphereKubernetes127UbuntuWithNTP(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(
t, framework.WithUbuntu127(),
framework.WithNTPServersForAllMachines(),
framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty
),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runNTPFlow(test, v1alpha1.Ubuntu)
}
// Bottlerocket Configuration test
func TestVSphereKubernetes127BottlerocketWithBottlerocketKubernetesSettings(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(
t, framework.WithBottleRocket127(),
framework.WithBottlerocketKubernetesSettingsForAllMachines(),
framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty
),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
runBottlerocketConfigurationFlow(test)
}
// Stacked etcd
func TestVSphereKubernetes123StackedEtcdUbuntu(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu123()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
func TestVSphereKubernetes124StackedEtcdUbuntu(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu124()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
func TestVSphereKubernetes125StackedEtcdUbuntu(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu125()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
func TestVSphereKubernetes126StackedEtcdUbuntu(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu126()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
func TestVSphereKubernetes127StackedEtcdUbuntu(t *testing.T) {
test := framework.NewClusterE2ETest(t,
framework.NewVSphere(t, framework.WithUbuntu127()),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(3)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()))
runStackedEtcdFlow(test)
}
// Taints
func TestVSphereKubernetes127UbuntuTaintsUpgradeFlow(t *testing.T) {
provider := ubuntu127ProviderWithTaints(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runTaintsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker2, api.WithNoTaints()),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
func TestVSphereKubernetes127BottlerocketTaintsUpgradeFlow(t *testing.T) {
provider := bottlerocket127ProviderWithTaints(t)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runTaintsUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup(worker2, api.WithNoTaints()),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
// Upgrade
func TestVSphereKubernetes123UbuntuTo124Upgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu123())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube124,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
provider.WithProviderUpgrade(provider.Ubuntu124Template()),
)
}
func TestVSphereKubernetes124UbuntuTo125Upgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu124())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube125,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube125)),
provider.WithProviderUpgrade(provider.Ubuntu125Template()),
)
}
func TestVSphereKubernetes125UbuntuTo126Upgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu125())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube126,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)),
provider.WithProviderUpgrade(provider.Ubuntu126Template()),
)
}
func TestVSphereKubernetes126UbuntuTo127Upgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(provider.Ubuntu127Template()),
)
}
func TestVSphereKubernetes126UbuntuTo127UpgradeCiliumPolicyEnforcementMode(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
provider.WithProviderUpgrade(provider.Ubuntu127Template()),
)
}
func TestVSphereKubernetes126UbuntuTo127MultipleFieldsUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(
provider.Ubuntu127Template(),
api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar),
api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate),
api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar),
api.WithFolderForAllMachines(vsphereFolderUpdateVar),
// Uncomment once we support tests with multiple machine configs
/*api.WithWorkloadVMsNumCPUs(vsphereWlVmNumCpuUpdateVar),
api.WithWorkloadVMsMemoryMiB(vsphereWlVmMemoryUpdate),
api.WithWorkloadDiskGiB(vsphereWlDiskGiBUpdate),*/
// Uncomment the network field once upgrade starts working with it
// api.WithNetwork(vsphereNetwork2UpdateVar),
),
)
}
func TestVSphereKubernetes127UbuntuControlPlaneNodeUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu127())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithControlPlaneCount(3)),
)
}
func TestVSphereKubernetes127UbuntuWorkerNodeUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu127())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)),
)
}
func TestVSphereKubernetes126BottlerocketTo127Upgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithBottleRocket126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(provider.Bottlerocket127Template()),
)
}
func TestVSphereKubernetes126BottlerocketTo127MultipleFieldsUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithBottleRocket126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(
provider.Bottlerocket127Template(),
api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar),
api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate),
api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar),
api.WithFolderForAllMachines(vsphereFolderUpdateVar),
// Uncomment once we support tests with multiple machine configs
/*api.WithWorkloadVMsNumCPUs(vsphereWlVmNumCpuUpdateVar),
api.WithWorkloadVMsMemoryMiB(vsphereWlVmMemoryUpdate),
api.WithWorkloadDiskGiB(vsphereWlDiskGiBUpdate),*/
// Uncomment the network field once upgrade starts working with it
// api.WithNetwork(vsphereNetwork2UpdateVar),
),
)
}
func TestVSphereKubernetes127BottlerocketControlPlaneNodeUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithBottleRocket127())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithControlPlaneCount(3)),
)
}
func TestVSphereKubernetes127BottlerocketWorkerNodeUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithBottleRocket127())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(3)),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)),
)
}
func TestVSphereKubernetes126UbuntuTo127StackedEtcdUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(provider.Ubuntu127Template()),
)
}
func TestVSphereKubernetes126BottlerocketTo127StackedEtcdUpgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithBottleRocket126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithStackedEtcdTopology()),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
provider.WithProviderUpgrade(provider.Bottlerocket127Template()),
)
}
func TestVSphereKubernetes126UbuntuTo127UpgradeWithCheckpoint(t *testing.T) {
var clusterOpts []framework.ClusterE2ETestOpt
var clusterOpts2 []framework.ClusterE2ETestOpt
provider := framework.NewVSphere(t, framework.WithUbuntu126())
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.ExpectFailure(true),
provider.WithProviderUpgrade(provider.Ubuntu127Template(), api.WithResourcePoolForAllMachines(vsphereInvalidResourcePoolUpdateVar)), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false"))
commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")}
clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.ExpectFailure(false),
provider.WithProviderUpgrade(provider.Ubuntu127Template(), api.WithResourcePoolForAllMachines(os.Getenv(vsphereResourcePoolVar))), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true"))
runUpgradeFlowWithCheckpoint(
test,
v1alpha1.Kube127,
clusterOpts,
clusterOpts2,
commandOpts,
)
}
func TestVSphereKubernetes124BottlerocketUpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
framework.WithBottlerocketFromRelease(release, v1alpha1.Kube124),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
provider.WithProviderUpgrade(
provider.Bottlerocket124Template(), // Set the template so it doesn't get autoimported
),
)
}
func TestVSphereKubernetes124UbuntuUpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu),
),
framework.WithUbuntuForRelease(release, v1alpha1.Kube124),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
provider.WithProviderUpgrade(
provider.Ubuntu124Template(), // Set the template so it doesn't get autoimported
),
)
}
func TestVSphereKubernetes126ManagementClusterUpgradeFromLatestSideEffects(t *testing.T) {
provider := framework.NewVSphere(t)
runTestManagementClusterUpgradeSideEffects(t, provider, v1alpha1.Ubuntu, v1alpha1.Kube126)
}
func TestVSphereKubernetes124UbuntuUpgradeFromLatestMinorReleaseAlwaysNetworkPolicy(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu),
),
framework.WithUbuntuForRelease(release, v1alpha1.Kube124),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)),
provider.WithProviderUpgrade(
provider.Ubuntu124Template(), // Set the template so it doesn't get autoimported
),
)
}
func TestVSphereKubernetes123To124UbuntuUpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu),
),
framework.WithUbuntuForRelease(release, v1alpha1.Kube123),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube123)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube124,
provider.WithProviderUpgrade(
provider.Ubuntu124Template(), // Set the template so it doesn't get autoimported
),
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube124)),
)
}
func TestVSphereKubernetes124To125UbuntuUpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu),
),
framework.WithUbuntuForRelease(release, v1alpha1.Kube124),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube124)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube125,
provider.WithProviderUpgrade(
provider.Ubuntu125Template(), // Set the template so it doesn't get autoimported
),
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube125)),
)
}
func TestVSphereKubernetes125To126UbuntuUpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu),
),
framework.WithUbuntuForRelease(release, v1alpha1.Kube125),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube126,
provider.WithProviderUpgrade(
provider.Ubuntu126Template(), // Set the template so it doesn't get autoimported
),
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)),
)
}
func TestVSphereKubernetes126To127UbuntuUpgradeFromLatestMinorRelease(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t,
framework.WithVSphereFillers(
api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu),
),
framework.WithUbuntuForRelease(release, v1alpha1.Kube126),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
)
runUpgradeFromReleaseFlow(
test,
release,
v1alpha1.Kube127,
provider.WithProviderUpgrade(
provider.Ubuntu127Template(), // Set the template so it doesn't get autoimported
),
framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)),
)
}
func TestVSphereKubernetes127BottlerocketAndRemoveWorkerNodeGroups(t *testing.T) {
provider := framework.NewVSphere(t,
framework.WithVSphereWorkerNodeGroup(
"worker-1",
framework.WithWorkerNodeGroup("workers-1", api.WithCount(2)),
),
framework.WithVSphereWorkerNodeGroup(
"worker-2",
framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)),
),
framework.WithBottleRocket127(),
)
test := framework.NewClusterE2ETest(
t,
provider,
framework.WithClusterFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
)
runSimpleUpgradeFlow(
test,
v1alpha1.Kube127,
framework.WithClusterUpgrade(
api.RemoveWorkerNodeGroup("workers-2"),
api.WithWorkerNodeGroup("workers-1", api.WithCount(1)),
),
provider.WithNewVSphereWorkerNodeGroup(
"worker-1",
framework.WithWorkerNodeGroup(
"workers-3",
api.WithCount(1),
),
),
)
}
func TestVSphereKubernetes124UbuntuUpgradeAndRemoveWorkerNodeGroupsAPI(t *testing.T) {
provider := framework.NewVSphere(t)
test := framework.NewClusterE2ETest(
t, provider,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube124),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
provider.WithNewWorkerNodeGroup("worker-1", framework.WithWorkerNodeGroup("worker-1", api.WithCount(2))),
provider.WithNewWorkerNodeGroup("worker-2", framework.WithWorkerNodeGroup("worker-2", api.WithCount(1))),
provider.WithNewWorkerNodeGroup("worker-3", framework.WithWorkerNodeGroup("worker-3", api.WithCount(1), api.WithLabel("tier", "frontend"))),
provider.WithUbuntu124(),
)
runUpgradeFlowWithAPI(
test,
api.ClusterToConfigFiller(
api.RemoveWorkerNodeGroup("worker-2"),
api.WithWorkerNodeGroup("worker-1", api.WithCount(1)),
api.RemoveWorkerNodeGroup("worker-3"),
),
// Re-adding with no labels and a taint
provider.WithWorkerNodeGroupConfiguration("worker-3", framework.WithWorkerNodeGroup("worker-3", api.WithCount(1), api.WithTaint(framework.NoScheduleTaint()))),
provider.WithWorkerNodeGroupConfiguration("worker-1", framework.WithWorkerNodeGroup("worker-4", api.WithCount(1))),
)
}
func TestVSphereKubernetes123to124UpgradeFromLatestMinorReleaseBottleRocketAPI(t *testing.T) {
release := latestMinorRelease(t)
provider := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, provider,
)
managementCluster.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
managementCluster.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
),
api.VSphereToConfigFiller(
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
provider.WithBottleRocketForRelease(release, v1alpha1.Kube123),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
wc := framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
)
wc.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release))
wc.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube123),
api.WithManagementCluster(managementCluster.ClusterName),
),
api.VSphereToConfigFiller(
api.WithOsFamilyForAllMachines(v1alpha1.Bottlerocket),
),
provider.WithBottleRocketForRelease(release, v1alpha1.Kube123),
)
test.WithWorkloadClusters(wc)
runMulticlusterUpgradeFromReleaseFlowAPI(
test,
release,
api.JoinClusterConfigFillers(
provider.WithBottleRocket124(),
api.VSphereToConfigFiller(
provider.Bottlerocket124Template(), // Set the template so it doesn't get autoimported
),
),
)
}
// Workload API
func TestVSphereMulticlusterWorkloadClusterAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu123(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu123(),
),
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu124(),
),
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu125(),
),
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu126(),
),
)
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestVSphereUpgradeLabelsTaintsUbuntuAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
vsphere.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(2), api.WithLabel("key1", "val2"), api.WithTaint(framework.NoScheduleTaint()))),
vsphere.WithNewWorkerNodeGroup("worker-1", framework.WithWorkerNodeGroup("worker-1", api.WithCount(1))),
vsphere.WithNewWorkerNodeGroup("worker-2", framework.WithWorkerNodeGroup("worker-2", api.WithCount(1), api.WithLabel("key2", "val2"), api.WithTaint(framework.PreferNoScheduleTaint()))),
vsphere.WithUbuntu124(),
),
)
runWorkloadClusterUpgradeFlowAPI(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeGroup("worker-0", api.WithLabel("key1", "val1"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-1", api.WithLabel("key2", "val2"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-2", api.WithNoTaints()),
api.WithControlPlaneLabel("cpKey1", "cpVal1"),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
func TestVSphereUpgradeWorkerNodeGroupsUbuntuAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
vsphere.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(1))),
vsphere.WithNewWorkerNodeGroup("worker-1", framework.WithWorkerNodeGroup("worker-1", api.WithCount(1))),
vsphere.WithUbuntu124(),
),
)
runWorkloadClusterUpgradeFlowAPI(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeGroup("worker-0", api.WithCount(2)),
api.RemoveWorkerNodeGroup("worker-1"),
),
vsphere.WithWorkerNodeGroupConfiguration("worker-1", framework.WithWorkerNodeGroup("worker-2", api.WithCount(1))),
)
}
func TestVSphereMulticlusterWorkloadClusterGitHubFluxAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
framework.WithFluxGithubConfig(),
vsphere.WithUbuntu124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu123(),
),
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
vsphere.WithUbuntu124(),
),
)
test.CreateManagementCluster()
test.RunInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestVSphereUpgradeKubernetes123to124UbuntuWorkloadClusterAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
vsphere.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(1))),
vsphere.WithUbuntu123(),
),
)
runWorkloadClusterUpgradeFlowAPI(test,
vsphere.WithUbuntu124(),
)
}
func TestVSphereCiliumUbuntuAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu124(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
vsphere.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(1))),
vsphere.WithUbuntu124(),
),
)
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.UpdateClusterConfig(
api.ClusterToConfigFiller(
api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways),
),
)
wc.ApplyClusterManifest()
wc.ValidateClusterState()
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestVSphereUpgradeLabelsTaintsBottleRocketGitHubFluxAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithBottleRocket124(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
vsphere.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(2), api.WithLabel("key1", "val2"), api.WithTaint(framework.NoScheduleTaint()))),
vsphere.WithNewWorkerNodeGroup("worker-1", framework.WithWorkerNodeGroup("worker-1", api.WithCount(1))),
vsphere.WithNewWorkerNodeGroup("worker-2", framework.WithWorkerNodeGroup("worker-2", api.WithCount(1), api.WithLabel("key2", "val2"), api.WithTaint(framework.PreferNoScheduleTaint()))),
vsphere.WithBottleRocket124(),
),
)
runWorkloadClusterUpgradeFlowAPIWithFlux(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeGroup("worker-0", api.WithLabel("key1", "val1"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-1", api.WithLabel("key2", "val2"), api.WithTaint(framework.NoExecuteTaint())),
api.WithWorkerNodeGroup("worker-2", api.WithNoTaints()),
api.WithControlPlaneLabel("cpKey1", "cpVal1"),
api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}),
),
)
}
func TestVSphereUpgradeWorkerNodeGroupsUbuntuGitHubFluxAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu124(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
vsphere.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(1))),
vsphere.WithNewWorkerNodeGroup("worker-1", framework.WithWorkerNodeGroup("worker-1", api.WithCount(1))),
vsphere.WithUbuntu124(),
),
)
runWorkloadClusterUpgradeFlowAPIWithFlux(test,
api.ClusterToConfigFiller(
api.WithWorkerNodeGroup("worker-0", api.WithCount(2)),
api.RemoveWorkerNodeGroup("worker-1"),
),
vsphere.WithWorkerNodeGroupConfiguration("worker-1", framework.WithWorkerNodeGroup("worker-2", api.WithCount(1))),
)
}
func TestVSphereUpgradeKubernetesCiliumUbuntuGitHubFluxAPI(t *testing.T) {
vsphere := framework.NewVSphere(t)
managementCluster := framework.NewClusterE2ETest(
t, vsphere, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithStackedEtcdTopology(),
),
vsphere.WithUbuntu124(),
framework.WithFluxGithubConfig(),
)
test := framework.NewMulticlusterE2ETest(t, managementCluster)
test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, vsphere, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithManagementCluster(managementCluster.ClusterName),
api.WithExternalEtcdTopology(1),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
),
vsphere.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(1))),
vsphere.WithUbuntu123(),
),
)
test.CreateManagementCluster()
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
test.PushWorkloadClusterToGit(wc)
wc.WaitForKubeconfig()
wc.ValidateClusterState()
test.PushWorkloadClusterToGit(wc,
api.ClusterToConfigFiller(
api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways),
),
vsphere.WithUbuntu124(),
)
wc.ValidateClusterState()
test.DeleteWorkloadClusterFromGit(wc)
wc.ValidateClusterDelete()
})
test.ManagementCluster.StopIfFailed()
test.DeleteManagementCluster()
}
func TestVSphereKubernetes127UbuntuAirgappedRegistryMirror(t *testing.T) {
test := framework.NewClusterE2ETest(
t,
framework.NewVSphere(t, framework.WithUbuntu127(), framework.WithPrivateNetwork()),
framework.WithClusterFiller(api.WithControlPlaneCount(1)),
framework.WithClusterFiller(api.WithWorkerNodeCount(1)),
framework.WithClusterFiller(api.WithExternalEtcdTopology(1)),
framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)),
framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName),
)
runAirgapConfigFlow(test, "195.18.0.1/16,196.18.0.1/16")
}
func ubuntu127ProviderWithLabels(t *testing.T) *framework.VSphere {
return framework.NewVSphere(t,
framework.WithVSphereWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(worker0, api.WithCount(2),
api.WithLabel(key1, val2)),
),
framework.WithVSphereWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithVSphereWorkerNodeGroup(
worker2,
framework.WithWorkerNodeGroup(worker2, api.WithCount(1),
api.WithLabel(key2, val2)),
),
framework.WithUbuntu127(),
)
}
func bottlerocket127ProviderWithLabels(t *testing.T) *framework.VSphere {
return framework.NewVSphere(t,
framework.WithVSphereWorkerNodeGroup(
worker0,
framework.WithWorkerNodeGroup(worker0, api.WithCount(2),
api.WithLabel(key1, val2)),
),
framework.WithVSphereWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithVSphereWorkerNodeGroup(
worker2,
framework.WithWorkerNodeGroup(worker2, api.WithCount(1),
api.WithLabel(key2, val2)),
),
framework.WithBottleRocket127(),
)
}
func ubuntu127ProviderWithTaints(t *testing.T) *framework.VSphere {
return framework.NewVSphere(t,
framework.WithVSphereWorkerNodeGroup(
worker0,
framework.NoScheduleWorkerNodeGroup(worker0, 2),
),
framework.WithVSphereWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithVSphereWorkerNodeGroup(
worker2,
framework.PreferNoScheduleWorkerNodeGroup(worker2, 1),
),
framework.WithUbuntu127(),
)
}
func bottlerocket127ProviderWithTaints(t *testing.T) *framework.VSphere {
return framework.NewVSphere(t,
framework.WithVSphereWorkerNodeGroup(
worker0,
framework.NoScheduleWorkerNodeGroup(worker0, 2),
),
framework.WithVSphereWorkerNodeGroup(
worker1,
framework.WithWorkerNodeGroup(worker1, api.WithCount(1)),
),
framework.WithVSphereWorkerNodeGroup(
worker2,
framework.PreferNoScheduleWorkerNodeGroup(worker2, 1),
),
framework.WithBottleRocket127(),
)
}
func runVSphereCloneModeFlow(test *framework.ClusterE2ETest, vsphere *framework.VSphere, diskSize int) {
test.GenerateClusterConfig()
test.CreateCluster()
vsphere.ValidateNodesDiskGiB(test.GetCapiMachinesForCluster(test.ClusterName), diskSize)
test.DeleteCluster()
}
| 3,052 |
eks-anywhere | aws | Go | package main
import (
"os"
"github.com/aws/eks-anywhere-test-tool/cmd"
)
func main() {
if cmd.Execute() == nil {
os.Exit(0)
}
os.Exit(-1)
}
| 15 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var e2eCmd = &cobra.Command{
Use: "e2e",
Short: "e2e test interaction",
Long: "Interact with and debug end-to-end tests",
}
func init() {
rootCmd.AddCommand(e2eCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
type e2eFetchOptions struct {
forBuildId string
forProject string
tests []string
logTo string
fetchAll bool
}
var e2eFetchCommand = &cobra.Command{
Use: "fetch",
Short: "e2e fetch command",
Long: "This command fetches various artifacts and logs from the e2e tests",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("sup it's the command fetch")
return nil
},
}
func init() {
e2eCmd.AddCommand(e2eFetchCommand)
err := viper.BindPFlags(e2eFetchCommand.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
| 36 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"os"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere-test-tool/pkg/artifacts"
"github.com/aws/eks-anywhere-test-tool/pkg/awsprofiles"
"github.com/aws/eks-anywhere-test-tool/pkg/cloudwatch"
"github.com/aws/eks-anywhere-test-tool/pkg/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere-test-tool/pkg/fileutils"
"github.com/aws/eks-anywhere-test-tool/pkg/filewriter"
"github.com/aws/eks-anywhere-test-tool/pkg/s3"
"github.com/aws/eks-anywhere/pkg/logger"
)
var fa = &e2eFetchOptions{}
var e2eFetchArtifactsCommand = &cobra.Command{
Use: "artifacts",
Short: "fetch artifacts",
Long: "This command fetches the artifacts associated with a given test execution",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("Let's fetch some artifacts! \U0001FAA3")
_, present := os.LookupEnv(constants.E2eArtifactsBucketEnvVar)
if !present {
logger.MarkFail("E2E Test artifact bucket env var is not set!", "var", constants.E2eArtifactsBucketEnvVar)
return fmt.Errorf("no e2e bucket env var set")
}
buildAccountCodebuild, err := codebuild.New(awsprofiles.BuildAccount)
if err != nil {
return fmt.Errorf("creating codebuild client: %v", err)
}
testAccountS3, err := s3.New(awsprofiles.TestAccount)
if err != nil {
return fmt.Errorf("creating s3 client: %v", err)
}
dir := fileutils.GenOutputDirName("artifacts")
writer := filewriter.NewWriter(dir)
if err != nil {
return fmt.Errorf("setting up writer: %v", err)
}
buildAccountCw, err := cloudwatch.New(awsprofiles.BuildAccount)
if err != nil {
return fmt.Errorf("creating cloudwatch logs client: %v", err)
}
artifactFetcher := artifacts.New(testAccountS3, buildAccountCodebuild, writer, buildAccountCw)
var opts []artifacts.FetchArtifactsOpt
if fa.forBuildId != "" {
opts = append(opts, artifacts.WithCodebuildBuild(fa.forBuildId))
}
if fa.forProject != "" {
opts = append(opts, artifacts.WithCodebuildProject(fa.forProject))
}
if fa.fetchAll {
opts = append(opts, artifacts.WithAllArtifacts())
}
return artifactFetcher.FetchArtifacts(opts...)
},
}
func init() {
e2eFetchCommand.AddCommand(e2eFetchArtifactsCommand)
e2eFetchArtifactsCommand.Flags().StringVar(&fa.forBuildId, "buildId", "", "Build ID to fetch artifacts for")
e2eFetchArtifactsCommand.Flags().StringVar(&fa.forProject, "project", "", "Project to fetch builds from")
e2eFetchArtifactsCommand.Flags().BoolVar(&fa.fetchAll, "all", false, "Fetch all artifacts")
err := viper.BindPFlags(e2eFetchArtifactsCommand.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
| 87 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere-test-tool/pkg/awsprofiles"
"github.com/aws/eks-anywhere-test-tool/pkg/cloudwatch"
"github.com/aws/eks-anywhere-test-tool/pkg/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/logfetcher"
)
const logToStdout = "stdout"
var fl = &e2eFetchOptions{}
var e2eFetchLogsCommand = &cobra.Command{
Use: "logs",
Short: "fetch logs",
Long: "This command fetches the Cloudwatch logs associated with a given test execution",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("Let's fetch some logs! \U0001FAB5")
buildAccountCodebuild, err := codebuild.New(awsprofiles.BuildAccount)
if err != nil {
return fmt.Errorf("creating codebuild client: %v", err)
}
buildAccountCw, err := cloudwatch.New(awsprofiles.BuildAccount)
if err != nil {
return fmt.Errorf("creating cloudwatch logs client: %v", err)
}
testAccountCw, err := cloudwatch.New(awsprofiles.TestAccount)
if err != nil {
return fmt.Errorf("instantiating CW profile: %v", err)
}
var fetcherOpts []logfetcher.LogFetcherOpt
if fl.tests != nil {
fetcherOpts = append(fetcherOpts, logfetcher.WithTestFilterByName(fl.tests))
}
if fl.logTo == logToStdout {
fetcherOpts = append(fetcherOpts, logfetcher.WithLogStdout())
}
fetcher := logfetcher.New(buildAccountCw, testAccountCw, buildAccountCodebuild, fetcherOpts...)
var opts []logfetcher.FetchLogsOpt
if fl.forBuildId != "" {
opts = append(opts, logfetcher.WithCodebuildBuild(fl.forBuildId))
}
if fl.forProject != "" {
opts = append(opts, logfetcher.WithCodebuildProject(fl.forProject))
}
return fetcher.FetchLogs(opts...)
},
}
func init() {
e2eFetchCommand.AddCommand(e2eFetchLogsCommand)
e2eFetchLogsCommand.Flags().StringVar(&fl.forBuildId, "buildId", "", "Build ID to fetch logs for")
e2eFetchLogsCommand.Flags().StringVar(&fl.forProject, "project", "", "Project to fetch builds from")
e2eFetchLogsCommand.Flags().StringSliceVar(&fl.tests, "tests", nil, "Filter tests by name")
e2eFetchLogsCommand.Flags().StringVar(&fl.logTo, "log-to", "", "Log output to")
err := viper.BindPFlags(e2eFetchLogsCommand.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
| 75 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere-test-tool/pkg/awsprofiles"
"github.com/aws/eks-anywhere-test-tool/pkg/cloudwatch"
"github.com/aws/eks-anywhere-test-tool/pkg/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/providerproxy"
)
type proxyLogsFetchOptions struct {
forBuildId string
forProject string
logTo string
}
var fs = &proxyLogsFetchOptions{}
var e2eFetchProxyLogsCommand = &cobra.Command{
Use: "providerProxyLogs",
Short: "fetch provider proxy logs associated with a give build execution",
Long: "This command fetches proxy logs which capture wire communication between our test clusters and the EKS A vsphere endpoint.",
RunE: func(cmd *cobra.Command, args []string) error {
buildAccountCodebuild, err := codebuild.New(awsprofiles.BuildAccount)
if err != nil {
return fmt.Errorf("creating codebuild client: %v", err)
}
buildAccountCw, err := cloudwatch.New(awsprofiles.BuildAccount)
if err != nil {
return fmt.Errorf("creating cloudwatch logs client: %v", err)
}
testAccountCw, err := cloudwatch.New(awsprofiles.TestAccount)
if err != nil {
return fmt.Errorf("instantiating CW profile: %v", err)
}
var fetcherOpts []providerproxy.ProxyFetcherOpt
if fs.logTo == logToStdout {
fetcherOpts = append(fetcherOpts, providerproxy.WithLogStdout())
}
fetcher := providerproxy.New(buildAccountCw, testAccountCw, buildAccountCodebuild, fetcherOpts...)
var opts []providerproxy.FetchSessionOpts
if fs.forBuildId != "" {
opts = append(opts, providerproxy.WithCodebuildBuild(fs.forBuildId))
}
if fs.forProject != "" {
opts = append(opts, providerproxy.WithCodebuildProject(fs.forProject))
}
return fetcher.FetchProviderProxyLogs(opts...)
},
}
func init() {
e2eFetchCommand.AddCommand(e2eFetchProxyLogsCommand)
e2eFetchProxyLogsCommand.Flags().StringVar(&fs.forBuildId, "buildId", "", "Build ID to fetch logs for")
e2eFetchProxyLogsCommand.Flags().StringVar(&fs.forProject, "project", "", "Project to fetch builds from")
e2eFetchProxyLogsCommand.Flags().StringVar(&fs.logTo, "log-to", "", "Log output to")
err := viper.BindPFlags(e2eFetchProxyLogsCommand.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
| 75 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/logger"
)
var rootCmd = &cobra.Command{
Use: "eks-a-test-tool",
Short: "Amazon EKS Anywhere Test Tooling",
Long: `Use eks-a-test-tool to evaluate EKS-A test results`,
PersistentPreRun: rootPersistentPreRun,
}
func init() {
rootCmd.PersistentFlags().IntP("verbosity", "v", 0, "Set the log level verbosity")
if err := viper.BindPFlags(rootCmd.PersistentFlags()); err != nil {
log.Fatalf("failed to bind flags for root: %v", err)
}
}
func rootPersistentPreRun(cmd *cobra.Command, args []string) {
if err := initLogger(); err != nil {
log.Fatal(err)
}
}
func initLogger() error {
if err := logger.InitZap(logger.ZapOpts{
Level: viper.GetInt("verbosity"),
}); err != nil {
return fmt.Errorf("failed init zap logger in root command: %v", err)
}
return nil
}
func Execute() error {
return rootCmd.Execute()
}
| 46 |
eks-anywhere | aws | Go | package artifacts
import (
"context"
"fmt"
"math/rand"
"os"
"strings"
"time"
cb "github.com/aws/aws-sdk-go/service/codebuild"
"golang.org/x/sync/errgroup"
"github.com/aws/eks-anywhere-test-tool/pkg/cloudwatch"
"github.com/aws/eks-anywhere-test-tool/pkg/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere-test-tool/pkg/filewriter"
"github.com/aws/eks-anywhere-test-tool/pkg/s3"
"github.com/aws/eks-anywhere-test-tool/pkg/testresults"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type FetchArtifactsOpt func(options *fetchArtifactConfig) (err error)
func WithCodebuildBuild(buildId string) FetchArtifactsOpt {
return func(options *fetchArtifactConfig) (err error) {
options.buildId = buildId
logger.Info("user provided build ID detected", "buildId", buildId)
return err
}
}
func WithCodebuildProject(project string) FetchArtifactsOpt {
return func(options *fetchArtifactConfig) (err error) {
options.project = project
logger.Info("user provided project ID detected", "project", project)
return err
}
}
func WithAllArtifacts() FetchArtifactsOpt {
return func(options *fetchArtifactConfig) (err error) {
options.fetchAll = true
return err
}
}
type fetchArtifactConfig struct {
buildId string
bucket string
project string
fetchAll bool
}
type testArtifactFetcher struct {
testAccountS3Client *s3.S3
buildAccountCodebuildClient *codebuild.Codebuild
buildAccountCwClient *cloudwatch.Cloudwatch
writer filewriter.FileWriter
retrier *retrier.Retrier
}
func New(testAccountS3Client *s3.S3, buildAccountCodebuildCient *codebuild.Codebuild, writer filewriter.FileWriter, cwClient *cloudwatch.Cloudwatch) *testArtifactFetcher {
return &testArtifactFetcher{
testAccountS3Client: testAccountS3Client,
buildAccountCodebuildClient: buildAccountCodebuildCient,
writer: writer,
retrier: fileWriterRetrier(),
buildAccountCwClient: cwClient,
}
}
func (l *testArtifactFetcher) FetchArtifacts(opts ...FetchArtifactsOpt) error {
config := &fetchArtifactConfig{
bucket: os.Getenv(constants.E2eArtifactsBucketEnvVar),
project: constants.EksATestCodebuildProject,
}
for _, opt := range opts {
err := opt(config)
if err != nil {
return fmt.Errorf("failed to set options on fetch artifacts config: %v", err)
}
}
var p *cb.Build
var err error
if config.buildId == "" {
p, err = l.buildAccountCodebuildClient.FetchLatestBuildForProject(config.project)
if err != nil {
return fmt.Errorf("failed to get latest build for project: %v", err)
}
config.buildId = *p.Id
} else {
p, err = l.buildAccountCodebuildClient.FetchBuildForProject(config.buildId)
if err != nil {
return fmt.Errorf("failed to get build for project: %v", err)
}
}
g := p.Logs.GroupName
s := p.Logs.StreamName
logs, err := l.buildAccountCwClient.GetLogs(*g, *s)
if err != nil {
return fmt.Errorf("fetching cloudwatch logs: %v", err)
}
_, failedTests, err := testresults.GetFailedTests(logs)
if err != nil {
return err
}
failedTestIds := testresults.TestResultsJobIdMap(failedTests)
logger.Info("Fetching build artifacts...")
objects, err := l.testAccountS3Client.ListObjects(config.bucket, config.buildId)
logger.V(5).Info("Listed objects", "bucket", config.bucket, "prefix", config.buildId, "objects", len(objects))
if err != nil {
return fmt.Errorf("listing objects in bucket %s at key %s: %v", config.bucket, config.buildId, err)
}
errs, _ := errgroup.WithContext(context.Background())
for _, object := range objects {
if excludedKey(*object.Key) {
continue
}
obj := *object
keySplit := strings.Split(*obj.Key, "/")
_, ok := failedTestIds[keySplit[0]]
if !ok && !config.fetchAll {
continue
}
errs.Go(func() error {
logger.Info("Fetching object", "key", obj.Key, "bucket", config.bucket)
o, err := l.testAccountS3Client.GetObject(config.bucket, *obj.Key)
if err != nil {
return err
}
logger.Info("Fetched object", "key", obj.Key, "bucket", config.bucket)
logger.Info("Writing object to file", "key", obj.Key, "bucket", config.bucket)
err = l.retrier.Retry(func() error {
return l.writer.WriteTestArtifactsS3ToFile(*obj.Key, o)
})
if err != nil {
logger.Info("error occurred while writing file", "err", err)
return fmt.Errorf("writing object %s from bucket %s to file: %v", *obj.Key, config.bucket, err)
}
return nil
})
}
return errs.Wait()
}
func excludedKey(key string) bool {
excludedKeys := []string{
"/.git/",
"/oidc/",
}
excludedSuffixes := []string{
"/e2e.test",
"/eksctl-anywhere",
".csv",
}
for _, s := range excludedKeys {
if strings.Contains(key, s) {
return true
}
}
for _, s := range excludedSuffixes {
if strings.HasSuffix(key, s) {
return true
}
}
return false
}
func fileWriterRetrier() *retrier.Retrier {
return retrier.New(time.Minute, retrier.WithRetryPolicy(func(totalRetries int, err error) (retry bool, wait time.Duration) {
generator := rand.New(rand.NewSource(time.Now().UnixNano()))
minWait := 1
maxWait := 5
waitWithJitter := time.Duration(generator.Intn(maxWait-minWait)+minWait) * time.Second
if isTooManyOpenFilesError(err) && totalRetries < 15 {
logger.V(2).Info("Too many files open, retrying")
return true, waitWithJitter
}
return false, 0
}))
}
func isTooManyOpenFilesError(err error) bool {
return strings.Contains(err.Error(), "too many open files")
}
| 205 |
eks-anywhere | aws | Go | package awsprofiles
type EksAccount int64
const (
BuildAccount EksAccount = iota
TestAccount
)
func (s EksAccount) ProfileName() string {
switch s {
case BuildAccount:
return "eks-a-build-account"
case TestAccount:
return "eks-a-test-account"
}
return "unknown"
}
| 19 |
eks-anywhere | aws | Go | package cloudwatch
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/eks-anywhere-test-tool/pkg/awsprofiles"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
)
type Cloudwatch struct {
session *session.Session
svc *cloudwatchlogs.CloudWatchLogs
}
func New(account awsprofiles.EksAccount) (*Cloudwatch, error) {
logger.V(2).Info("creating Cloudwatch client")
sess, err := session.NewSessionWithOptions(session.Options{
Profile: account.ProfileName(),
Config: aws.Config{Region: aws.String(constants.AwsAccountRegion)},
})
if err != nil {
fmt.Printf("Got error when setting up session: %v", err)
os.Exit(1)
}
svc := cloudwatchlogs.New(sess)
logger.V(2).Info("created Cloudwatch client")
return &Cloudwatch{
session: sess,
svc: svc,
}, nil
}
func (c *Cloudwatch) GetLogs(logGroupName string, logStreamName string) ([]*cloudwatchlogs.OutputLogEvent, error) {
return c.getLogs(logGroupName, logStreamName, nil, nil)
}
func (c *Cloudwatch) GetLogsInTimeframe(logGroupName string, logStreamName string, startTime int64, endTime int64) ([]*cloudwatchlogs.OutputLogEvent, error) {
return c.getLogs(logGroupName, logStreamName, &startTime, &endTime)
}
func (c *Cloudwatch) getLogs(logGroupName string, logStreamName string, startTime *int64, endTime *int64) ([]*cloudwatchlogs.OutputLogEvent, error) {
var nextToken *string
var output []*cloudwatchlogs.OutputLogEvent
for {
l, err := c.getLogSegment(logGroupName, logStreamName, startTime, endTime, nextToken)
if err != nil {
if isInvalidParameterError(err) {
logger.Info("log stream does not exist. Proceeding to fetch next log events", "logStream", logStreamName)
} else {
logger.Info("error fetching cloudwatch logs", "group", logGroupName, "stream", logStreamName, "err", err)
return nil, err
}
}
if l.NextForwardToken == nil || nextToken != nil && *nextToken == *l.NextForwardToken {
logger.Info("finished fetching logs", "logGroup", logGroupName, "logStream", logStreamName)
logger.V(3).Info("token comparison", "nextToken", nextToken, "nextForwardToken", l.NextForwardToken)
break
}
nextToken = l.NextForwardToken
logger.Info("fetched logs", "logGroup", logGroupName, "logStream", logStreamName, "events", len(l.Events))
logger.V(3).Info("token comparison", "nextToken", nextToken, "nextForwardToken", l.NextForwardToken)
output = append(output, l.Events...)
}
return output, nil
}
func (c Cloudwatch) getLogSegment(logGroupName string, logStreamName string, startTime *int64, endTime *int64, nextToken *string) (*cloudwatchlogs.GetLogEventsOutput, error) {
input := &cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(logGroupName),
LogStreamName: aws.String(logStreamName),
NextToken: nextToken,
StartFromHead: aws.Bool(true),
}
if startTime != nil {
input.StartTime = startTime
}
if endTime != nil {
input.EndTime = endTime
}
return c.svc.GetLogEvents(input)
}
func isInvalidParameterError(err error) bool {
if awsErr, ok := err.(awserr.Error); ok {
return awsErr.Code() == cloudwatchlogs.ErrCodeInvalidParameterException
}
return false
}
| 100 |
eks-anywhere | aws | Go | package codebuild
import (
"errors"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/awsprofiles"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
)
type Codebuild struct {
session *session.Session
svc *codebuild.CodeBuild
}
func New(account awsprofiles.EksAccount) (*Codebuild, error) {
logger.V(2).Info("creating codebuild client")
sess, err := session.NewSessionWithOptions(session.Options{
Profile: account.ProfileName(),
Config: aws.Config{
Region: aws.String(constants.AwsAccountRegion),
CredentialsChainVerboseErrors: aws.Bool(true),
},
})
if err != nil {
return nil, fmt.Errorf("got error when setting up session: %v", err)
}
svc := codebuild.New(sess)
logger.V(2).Info("created codebuild client")
return &Codebuild{
session: sess,
svc: svc,
}, nil
}
func (c *Codebuild) FetchBuildForProject(id string) (*codebuild.Build, error) {
return c.getBuildById(id)
}
func (c *Codebuild) FetchLatestBuildForProject(project string) (*codebuild.Build, error) {
builds := c.FetchBuildsForProject(project)
// Find latest build that is not in progress
for _, id := range builds.Ids {
b, err := c.getBuildById(*id)
if err != nil {
return nil, err
}
if *b.BuildStatus != codebuild.StatusTypeInProgress {
return b, nil
}
}
return nil, errors.New("can't find a build for project that has already finished")
}
func (c *Codebuild) getBuildById(id string) (*codebuild.Build, error) {
i := []*string{aws.String(id)}
latestBuild, err := c.svc.BatchGetBuilds(&codebuild.BatchGetBuildsInput{Ids: i})
if err != nil {
return nil, fmt.Errorf("got an error when fetching latest build for project: %v", err)
}
if len(latestBuild.Builds) < 1 {
return nil, fmt.Errorf("no builds found with id %s", id)
}
return latestBuild.Builds[0], nil
}
func (c *Codebuild) FetchBuildsForProject(project string) *codebuild.ListBuildsForProjectOutput {
// we're using this to get the latest build, so we don't care about pagination at the moment
builds, err := c.svc.ListBuildsForProject(&codebuild.ListBuildsForProjectInput{
NextToken: nil,
ProjectName: aws.String(project),
SortOrder: aws.String(codebuild.SortOrderTypeDescending),
})
if err != nil {
fmt.Printf("Got an error when fetching builds for project: %v", err)
os.Exit(1)
}
return builds
}
| 90 |
eks-anywhere | aws | Go | package constants
const (
AwsAccountRegion = "us-west-2"
BuildDescriptionFile = "codebuild-description.json"
EksATestCodebuildProject = "aws-eks-anywhere-test"
E2eIndividualTestLogGroup = "/eks-anywhere/test/e2e"
E2eArtifactsBucketEnvVar = "E2E_ARTIFACTS_BUCKET"
FailedMessage = "An e2e instance run has failed"
SuccessMessage = "Ec2 instance tests completed successfully"
FailedTestsFile = "failed-tests.txt"
LogOutputFile = "codebuild-log.txt"
CiProxyLogGroup = "nginx-vcenter-proxy.log"
CiProxyLogStream = "nginx-vcenter-proxy.log"
)
| 16 |
eks-anywhere | aws | Go | package fileutils
import (
"strings"
"time"
)
func GenOutputDirName(suffix string) string {
now := time.Now().Format(time.RFC3339)
// Replace : characters with _ for easier double-click selection in a
// terminal.
prefix := strings.ReplaceAll(now, ":", "_")
return prefix + "-" + suffix
}
| 15 |
eks-anywhere | aws | Go | package filewriter
import "os"
type FileWriter interface {
Write(fileName string, content []byte, f ...FileOptionsFunc) (path string, err error)
WithDir(dir string) (FileWriter, error)
WriteTestArtifactsS3ToFile(key string, data []byte) error
CleanUp()
CleanUpTemp()
Dir() string
}
type FileOptions struct {
IsTemp bool
Permissions os.FileMode
}
type FileOptionsFunc func(op *FileOptions)
| 20 |
eks-anywhere | aws | Go | package filewriter
import (
"os"
)
const DefaultTmpFolder = "generated"
func defaultFileOptions() *FileOptions {
return &FileOptions{true, os.ModePerm}
}
func Permission0600(op *FileOptions) {
op.Permissions = 0o600
}
func PersistentFile(op *FileOptions) {
op.IsTemp = false
}
| 20 |
eks-anywhere | aws | Go | package filewriter
import (
"errors"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"github.com/aws/eks-anywhere/pkg/logger"
)
type writer struct {
dir string
}
func NewWriter(dir string) FileWriter {
return &writer{dir: dir}
}
func (t *writer) Write(fileName string, content []byte, f ...FileOptionsFunc) (string, error) {
if strings.Contains(fileName, "|") {
count := strings.Count(fileName, "|") - 1
fileName = fmt.Sprintf("%s+%dTests", fileName[:strings.Index(fileName, "|")], count)
}
newFolder := filepath.Join(t.dir, DefaultTmpFolder)
if _, err := os.Stat(newFolder); errors.Is(err, os.ErrNotExist) {
err := os.MkdirAll(newFolder, os.ModePerm)
if err != nil {
return "", err
}
}
op := defaultFileOptions() // Default file options. -->> temporary file with default permissions
for _, optionFunc := range f {
optionFunc(op)
}
var currentDir string
if op.IsTemp {
currentDir = t.dir + "/" + DefaultTmpFolder
} else {
currentDir = t.dir
}
filePath := filepath.Join(currentDir, fileName)
err := os.WriteFile(filePath, content, op.Permissions)
if err != nil {
return "", fmt.Errorf("writing to file [%s]: %v", filePath, err)
}
return filePath, nil
}
func (w *writer) WithDir(dir string) (FileWriter, error) {
return NewWriter(filepath.Join(w.dir, dir)), nil
}
func (t *writer) Dir() string {
return t.dir
}
// This method writes the e2e test artifacts from S3 to files in a directory named after the e2e test name.
func (t *writer) WriteTestArtifactsS3ToFile(key string, data []byte) error {
i := strings.LastIndex(key, "/Test")
if i == -1 {
logger.Info("Failed writing object to file", "key", key)
return nil
}
p := path.Join(t.dir, key[i:])
err := os.MkdirAll(path.Dir(p), os.ModePerm)
if err != nil {
return err
}
err = os.WriteFile(p, data, os.ModePerm)
if err != nil {
return err
}
return nil
}
func (t *writer) CleanUp() {
_, err := os.Stat(t.dir)
if err == nil {
os.RemoveAll(t.dir)
}
}
func (t *writer) CleanUpTemp() {
currentDir := filepath.Join(t.dir, DefaultTmpFolder)
_, err := os.Stat(currentDir)
if err == nil {
os.RemoveAll(currentDir)
}
}
| 98 |
eks-anywhere | aws | Go | package logfetcher
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
awscodebuild "github.com/aws/aws-sdk-go/service/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/cloudwatch"
"github.com/aws/eks-anywhere-test-tool/pkg/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere-test-tool/pkg/fileutils"
"github.com/aws/eks-anywhere-test-tool/pkg/testresults"
"github.com/aws/eks-anywhere/pkg/logger"
)
type FetchLogsOpt func(options *fetchLogsConfig) (err error)
func WithCodebuildBuild(buildId string) FetchLogsOpt {
return func(options *fetchLogsConfig) (err error) {
options.buildId = buildId
logger.Info("user provided build ID detected", "buildId", buildId)
return err
}
}
func WithCodebuildProject(project string) FetchLogsOpt {
return func(options *fetchLogsConfig) (err error) {
options.project = project
logger.Info("user provided project ID detected", "project", project)
return err
}
}
type fetchLogsConfig struct {
buildId string
project string
}
var ssmCommandExecutionLogStreamTemplate = "%s/%s/aws-runShellScript/%s"
type (
codebuildConsumer func(*awscodebuild.Build) error
messagesConsumer func(allMessages, filteredMessages *bytes.Buffer) error
testConsumer func(testName string, logs []*cloudwatchlogs.OutputLogEvent) error
)
type LogFetcherOpt func(*testLogFetcher)
func WithTestFilterByName(tests []string) LogFetcherOpt {
return func(l *testLogFetcher) {
l.filterTests = testresults.NewTestFilterByName(tests)
}
}
func WithLogStdout() LogFetcherOpt {
return func(l *testLogFetcher) {
l.processCodebuild = func(*awscodebuild.Build) error { return nil }
l.processMessages = func(allMessages, filteredMessages *bytes.Buffer) error { return nil }
l.processTest = logTest
}
}
type testLogFetcher struct {
buildAccountCwClient *cloudwatch.Cloudwatch
testAccountCwClient *cloudwatch.Cloudwatch
buildAccountCodebuildClient *codebuild.Codebuild
writer *testsWriter
filterTests testresults.TestFilter
processCodebuild codebuildConsumer
processMessages messagesConsumer
processTest testConsumer
}
func New(buildAccountCwClient *cloudwatch.Cloudwatch, testAccountCwClient *cloudwatch.Cloudwatch, buildAccountCodebuildClient *codebuild.Codebuild, opts ...LogFetcherOpt) *testLogFetcher {
l := &testLogFetcher{
buildAccountCwClient: buildAccountCwClient,
testAccountCwClient: testAccountCwClient,
buildAccountCodebuildClient: buildAccountCodebuildClient,
}
for _, o := range opts {
o(l)
}
defaultOutputFolder := fileutils.GenOutputDirName("logs")
if l.filterTests == nil {
l.filterTests = testresults.GetFailedTests
}
if l.processCodebuild == nil {
_ = l.ensureWriter(defaultOutputFolder)
l.processCodebuild = l.writer.writeCodeBuild
}
if l.processMessages == nil {
_ = l.ensureWriter(defaultOutputFolder)
l.processMessages = l.writer.writeMessages
}
if l.processTest == nil {
_ = l.ensureWriter(defaultOutputFolder)
l.processTest = l.writer.writeTest
}
return l
}
func (l *testLogFetcher) FetchLogs(opts ...FetchLogsOpt) error {
config := &fetchLogsConfig{
project: constants.EksATestCodebuildProject,
}
for _, opt := range opts {
err := opt(config)
if err != nil {
return fmt.Errorf("failed to set options on fetch logs config: %v", err)
}
}
if config.buildId == "" {
p, err := l.buildAccountCodebuildClient.FetchLatestBuildForProject(config.project)
if err != nil {
return fmt.Errorf("failed to get latest build for project: %v", err)
}
config.buildId = *p.Id
logger.Info("Using latest build for selected project", "buildID", config.buildId, "project", config.project)
}
failedTests, err := l.GetBuildProjectLogs(config.project, config.buildId)
if err != nil {
return err
}
err = l.FetchTestLogs(failedTests)
if err != nil {
return err
}
return nil
}
func (l *testLogFetcher) GetBuildProjectLogs(project string, buildId string) ([]testresults.TestResult, error) {
logger.Info("Fetching build project logs...")
build, err := l.buildAccountCodebuildClient.FetchBuildForProject(buildId)
if err != nil {
return nil, fmt.Errorf("fetching build project logs for project %s: %v", project, err)
}
g := build.Logs.GroupName
s := build.Logs.StreamName
logs, err := l.buildAccountCwClient.GetLogs(*g, *s)
if err != nil {
return nil, fmt.Errorf("fetching cloudwatch logs: %v", err)
}
allMsg := allMessages(logs)
filteredMsg, filteredTests, err := l.filterTests(logs)
if err != nil {
return nil, err
}
if err = l.processCodebuild(build); err != nil {
return nil, err
}
if err = l.processMessages(allMsg, filteredMsg); err != nil {
return nil, err
}
return filteredTests, nil
}
func (l *testLogFetcher) FetchTestLogs(tests []testresults.TestResult) error {
logger.Info("Fetching individual test logs...")
for _, test := range tests {
stdout := fmt.Sprintf(ssmCommandExecutionLogStreamTemplate, test.CommandId, test.InstanceId, "stdout")
logs, err := l.testAccountCwClient.GetLogs(constants.E2eIndividualTestLogGroup, stdout)
if err != nil {
logger.Info("error when fetching cloudwatch logs", "error", err)
return err
}
if err := l.processTest(test.Tests, logs); err != nil {
return nil
}
}
return nil
}
func (l *testLogFetcher) ensureWriter(folderPath string) error {
if l.writer != nil {
return nil
}
var err error
l.writer, err = newTestsWriter(folderPath)
if err != nil {
return err
}
return nil
}
func allMessages(logs []*cloudwatchlogs.OutputLogEvent) *bytes.Buffer {
allMsg := new(bytes.Buffer)
for _, event := range logs {
allMsg.WriteString(*event.Message)
}
return allMsg
}
| 214 |
eks-anywhere | aws | Go | package logfetcher
import (
"fmt"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
Reset = "\033[0m"
Black = "\033[30m"
Red = "\033[31m"
Green = "\033[32m"
Yellow = "\033[33m"
Blue = "\033[34m"
Purple = "\033[35m"
Cyan = "\033[36m"
Grey = "\033[37m"
White = "\033[97m"
)
type colorer func(string) string
var colorsForRegexp = []struct {
regex *regexp.Regexp
colorer colorer
}{
{
// not very relevant CLI logs
regex: regexp.MustCompile(`^.*\s*V(4|5|6|7|8|9)\s{1}[^e]`),
colorer: black,
},
{
// e2e test logs
regex: regexp.MustCompile(`^.*\s*V\d\s*e2e`),
colorer: blue,
},
{
// Go test logs
regex: regexp.MustCompile(`^.*\.go:\d*:`),
colorer: blue,
},
{
// Go test start
regex: regexp.MustCompile("^=== RUN "),
colorer: green,
},
{
// CLI warning
regex: regexp.MustCompile(`^.*\s*V\d\s*Warning:`),
colorer: yellow,
},
{
// CLI error
regex: regexp.MustCompile("^Error:"),
colorer: red,
},
{
// Go test failure
regex: regexp.MustCompile("^--- FAIL:|^FAIL"),
colorer: red,
},
}
func logTest(testName string, logs []*cloudwatchlogs.OutputLogEvent) error {
logger.Info("Test logs", "testName", testName)
for _, e := range logs {
m := *e.Message
for _, line := range strings.Split(m, "\n") {
for _, rc := range colorsForRegexp {
if rc.regex.Match([]byte(line)) {
line = rc.colorer(line)
break
}
}
fmt.Println(line)
}
}
return nil
}
func color(m, c string) string {
return c + m + Reset
}
func blue(m string) string {
return color(m, Blue)
}
func red(m string) string {
return color(m, Red)
}
func green(m string) string {
return color(m, Green)
}
func yellow(m string) string {
return color(m, Yellow)
}
func black(m string) string {
return color(m, Black)
}
| 111 |
eks-anywhere | aws | Go | package logfetcher
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
awscodebuild "github.com/aws/aws-sdk-go/service/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere-test-tool/pkg/filewriter"
)
type testsWriter struct {
filewriter.FileWriter
}
func newTestsWriter(folderPath string) (*testsWriter, error) {
writer := filewriter.NewWriter(folderPath)
return &testsWriter{FileWriter: writer}, nil
}
func (w *testsWriter) writeCodeBuild(build *awscodebuild.Build) error {
if _, err := w.Write(constants.BuildDescriptionFile, []byte(build.String()), filewriter.PersistentFile); err != nil {
return fmt.Errorf("writing build description: %v", err)
}
return nil
}
func (w *testsWriter) writeMessages(allMessages, filteredMessages *bytes.Buffer) error {
if _, err := w.Write(constants.FailedTestsFile, filteredMessages.Bytes(), filewriter.PersistentFile); err != nil {
return err
}
if _, err := w.Write(constants.LogOutputFile, allMessages.Bytes(), filewriter.PersistentFile); err != nil {
return err
}
return nil
}
func (w *testsWriter) writeTest(testName string, logs []*cloudwatchlogs.OutputLogEvent) error {
buf := new(bytes.Buffer)
for _, log := range logs {
buf.WriteString(*log.Message)
}
if _, err := w.Write(testName, buf.Bytes(), filewriter.PersistentFile); err != nil {
return err
}
return nil
}
| 55 |
eks-anywhere | aws | Go | package providerproxy
import (
"fmt"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/eks-anywhere-test-tool/pkg/cloudwatch"
"github.com/aws/eks-anywhere-test-tool/pkg/codebuild"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere-test-tool/pkg/fileutils"
"github.com/aws/eks-anywhere/pkg/logger"
)
type FetchSessionOpts func(options *fetchSessionsConfig) (err error)
func WithCodebuildBuild(buildId string) FetchSessionOpts {
return func(options *fetchSessionsConfig) (err error) {
options.buildId = buildId
logger.Info("user provided build ID detected", "buildId", buildId)
return err
}
}
func WithCodebuildProject(project string) FetchSessionOpts {
return func(options *fetchSessionsConfig) (err error) {
options.project = project
logger.Info("user provided project ID detected", "project", project)
return err
}
}
type fetchSessionsConfig struct {
buildId string
project string
}
type (
requestFilter func(logs []*cloudwatchlogs.OutputLogEvent) (filteredLogs []*cloudwatchlogs.OutputLogEvent, err error)
requestConsumer func(logs []*cloudwatchlogs.OutputLogEvent) error
)
type ProxyFetcherOpt func(*proxyLogFetcher)
func WithLogStdout() ProxyFetcherOpt {
return func(l *proxyLogFetcher) {
l.processRequests = func(logs []*cloudwatchlogs.OutputLogEvent) error { return nil }
}
}
type proxyLogFetcher struct {
buildAccountCwClient *cloudwatch.Cloudwatch
testAccountCwClient *cloudwatch.Cloudwatch
buildAccountCodebuildClient *codebuild.Codebuild
writer *requestWriter
filterRequests requestFilter
processRequests requestConsumer
}
func New(buildAccountCwClient *cloudwatch.Cloudwatch, testAccountCwClient *cloudwatch.Cloudwatch, buildAccountCodebuildClient *codebuild.Codebuild, opts ...ProxyFetcherOpt) *proxyLogFetcher {
l := &proxyLogFetcher{
buildAccountCwClient: buildAccountCwClient,
testAccountCwClient: testAccountCwClient,
buildAccountCodebuildClient: buildAccountCodebuildClient,
}
for _, o := range opts {
o(l)
}
defaultOutputFolder := fileutils.GenOutputDirName("provider-proxy-logs")
if l.filterRequests == nil {
l.filterRequests = noFilter
}
if l.processRequests == nil {
_ = l.ensureWriter(defaultOutputFolder)
l.processRequests = l.writer.writeRequest
}
return l
}
func (l *proxyLogFetcher) FetchProviderProxyLogs(opts ...FetchSessionOpts) error {
config := &fetchSessionsConfig{
project: constants.EksATestCodebuildProject,
}
for _, opt := range opts {
err := opt(config)
if err != nil {
return fmt.Errorf("failed to set options on fetch logs config: %v", err)
}
}
if config.buildId == "" {
p, err := l.buildAccountCodebuildClient.FetchLatestBuildForProject(config.project)
if err != nil {
return fmt.Errorf("failed to get latest build for project: %v", err)
}
config.buildId = *p.Id
logger.Info("Using latest build for selected project", "buildID", config.buildId, "project", config.project)
}
logs, err := l.FetchProviderProxyLogsForbuild(config.project, config.buildId)
if err != nil {
return err
}
err = l.processRequests(logs)
if err != nil {
return err
}
return nil
}
func (l *proxyLogFetcher) FetchProviderProxyLogsForbuild(project string, buildId string) ([]*cloudwatchlogs.OutputLogEvent, error) {
logger.Info("Fetching provider proxy logs...")
build, err := l.buildAccountCodebuildClient.FetchBuildForProject(buildId)
if err != nil {
return nil, fmt.Errorf("fetching build for project %s: %v", project, err)
}
buildStart := build.StartTime.UnixNano() / 1e6
logger.Info("Starting log time", "Start time", buildStart)
buildEnd := build.EndTime.UnixNano() / 1e6
logger.Info("Ending log time", "Start time", buildEnd)
logs, err := l.buildAccountCwClient.GetLogsInTimeframe(constants.CiProxyLogGroup, constants.CiProxyLogStream, buildStart, buildEnd)
if err != nil {
return nil, fmt.Errorf("fetching cloudwatch logs: %v", err)
}
filteredLogs, err := l.filterRequests(logs)
return filteredLogs, err
}
func (l *proxyLogFetcher) ensureWriter(folderPath string) error {
if l.writer != nil {
return nil
}
var err error
l.writer, err = newRequestWriter(folderPath)
if err != nil {
return err
}
return nil
}
func noFilter(logs []*cloudwatchlogs.OutputLogEvent) (outputLogs []*cloudwatchlogs.OutputLogEvent, err error) {
return logs, nil
}
| 154 |
eks-anywhere | aws | Go | package providerproxy
import (
"bytes"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/eks-anywhere-test-tool/pkg/filewriter"
)
type requestWriter struct {
filewriter.FileWriter
}
func newRequestWriter(folderPath string) (*requestWriter, error) {
writer := filewriter.NewWriter(folderPath)
return &requestWriter{FileWriter: writer}, nil
}
func (w *requestWriter) writeRequest(logs []*cloudwatchlogs.OutputLogEvent) error {
buf := new(bytes.Buffer)
for _, log := range logs {
buf.WriteString(*log.Message + "\n")
}
if _, err := w.Write("requests", buf.Bytes(), filewriter.PersistentFile); err != nil {
return err
}
return nil
}
| 32 |
eks-anywhere | aws | Go | package providerproxy
import (
"strings"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
)
func VsphereSessionsFilter(logs []*cloudwatchlogs.OutputLogEvent) (outputLogs []*cloudwatchlogs.OutputLogEvent, err error) {
vsphereSoapSessionType := "SessionManager"
var sessionLogs []*cloudwatchlogs.OutputLogEvent
for _, log := range logs {
if strings.Contains(*log.Message, vsphereSoapSessionType) {
sessionLogs = append(sessionLogs, log)
}
}
return sessionLogs, nil
}
| 19 |
eks-anywhere | aws | Go | package s3
import (
"bytes"
"fmt"
"math/rand"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/eks-anywhere-test-tool/pkg/awsprofiles"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type S3 struct {
session *session.Session
svc *s3.S3
retrier *retrier.Retrier
}
func New(account awsprofiles.EksAccount) (*S3, error) {
logger.V(2).Info("creating S3 client")
sess, err := session.NewSessionWithOptions(session.Options{
Profile: account.ProfileName(),
Config: aws.Config{
Region: aws.String(constants.AwsAccountRegion),
CredentialsChainVerboseErrors: aws.Bool(true),
},
})
if err != nil {
fmt.Printf("Got error when setting up session: %v", err)
os.Exit(1)
}
svc := s3.New(sess)
logger.V(2).Info("created S3 client")
return &S3{
session: sess,
svc: svc,
retrier: getObjectRetirer(),
}, nil
}
func (s *S3) ListObjects(bucket string, prefix string) (listedObjects []*s3.Object, err error) {
var nextToken *string
var objects []*s3.Object
input := &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
ContinuationToken: nextToken,
}
for {
l, err := s.svc.ListObjectsV2(input)
if err != nil {
return nil, fmt.Errorf("failed to list objects: %v", err)
}
objects = append(objects, l.Contents...)
if !aws.BoolValue(l.IsTruncated) {
logger.Info("finished fetching objects", "bucket", bucket, "prefix", prefix)
logger.V(3).Info("token comparison", "nextToken", nextToken, "nextContinuatonToken", l.NextContinuationToken)
break
}
nextToken = l.NextContinuationToken
input.ContinuationToken = nextToken
logger.Info("fetched objects", "bucket", bucket, "prefix", prefix, "events", len(l.Contents))
logger.V(3).Info("token comparison", "nextToken", nextToken, "nextContinuatonToken", l.NextContinuationToken)
}
return objects, nil
}
func (s *S3) GetObject(bucket string, key string) ([]byte, error) {
input := &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
}
var obj *s3.GetObjectOutput
var err error
err = s.retrier.Retry(func() error {
obj, err = s.svc.GetObject(input)
return err
})
if err != nil {
return nil, fmt.Errorf("failed to get object at key %s: %v", key, err)
}
buf := new(bytes.Buffer)
_, err = buf.ReadFrom(obj.Body)
if err != nil {
return nil, fmt.Errorf("failed to read object at key %s: %v", key, err)
}
return buf.Bytes(), nil
}
func getObjectRetirer() *retrier.Retrier {
return retrier.New(time.Minute, retrier.WithRetryPolicy(func(totalRetries int, err error) (retry bool, wait time.Duration) {
generator := rand.New(rand.NewSource(time.Now().UnixNano()))
minWait := 1
maxWait := 5
waitWithJitter := time.Duration(generator.Intn(maxWait-minWait)+minWait) * time.Second
if isThrottledError(err) && totalRetries < 15 {
logger.V(2).Info("Throttled by S3, retrying")
return true, waitWithJitter
}
return false, 0
}))
}
func isThrottledError(err error) bool {
return strings.Contains(err.Error(), "no such host")
}
| 119 |
eks-anywhere | aws | Go | package testresults
import (
"bytes"
"encoding/json"
"strings"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/eks-anywhere-test-tool/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
)
type TestFilter func(logs []*cloudwatchlogs.OutputLogEvent) (filteredTestsLogs *bytes.Buffer, filteredTestResults []TestResult, err error)
type TestResult struct {
InstanceId string `json:"instanceId"`
JobId string `json:"jobId"`
CommandId string `json:"commandId"`
Tests string `json:"tests"`
Status string `json:"status"`
Error string `json:"error"`
}
func GetFailedTests(logs []*cloudwatchlogs.OutputLogEvent) (failedTestMessages *bytes.Buffer, failedTestResults []TestResult, err error) {
var failedTests []TestResult
failedTestMessages = &bytes.Buffer{}
for _, event := range logs {
if strings.Contains(*event.Message, constants.FailedMessage) {
msg := *event.Message
i := strings.Index(msg, "{")
subMsg := msg[i:]
var r TestResult
err = json.Unmarshal([]byte(subMsg), &r)
if err != nil {
logger.Info("error when unmarshalling json of test results", "error", err)
return nil, nil, err
}
failedTests = append(failedTests, r)
failedTestMessages.WriteString(subMsg)
}
}
return failedTestMessages, failedTests, nil
}
func NewTestFilterByName(tests []string) TestFilter {
lookup := types.SliceToLookup(tests)
return func(logs []*cloudwatchlogs.OutputLogEvent) (filteredTestsLogs *bytes.Buffer, filteredTestResults []TestResult, err error) {
filteredTestsLogs = &bytes.Buffer{}
for _, event := range logs {
if !isResultMessage(*event.Message) {
continue
}
msg := *event.Message
i := strings.Index(msg, "{")
subMsg := msg[i:]
var r TestResult
err = json.Unmarshal([]byte(subMsg), &r)
if err != nil {
logger.Info("error when unmarshalling json of test results", "error", err)
return nil, nil, err
}
if !lookup.IsPresent(r.Tests) {
continue
}
filteredTestResults = append(filteredTestResults, r)
filteredTestsLogs.WriteString(subMsg)
}
return filteredTestsLogs, filteredTestResults, nil
}
}
func TestResultsJobIdMap(tests []TestResult) map[string]bool {
m := make(map[string]bool, len(tests))
for _, test := range tests {
m[test.JobId] = true
}
return m
}
func isResultMessage(message string) bool {
return strings.Contains(message, constants.FailedMessage) || strings.Contains(message, constants.SuccessMessage)
}
| 91 |
eks-anywhere | aws | Go | package framework
import (
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/cluster"
)
var incompatiblePathsForVersion = map[string][]string{
"v0.6.1": {
"spec.clusterNetwork.dns",
"spec.workerNodeGroupConfigurations[].name",
},
}
func cleanUpClusterForVersion(config *cluster.Config, version string) error {
return api.CleanupPathsInObject(config.Cluster, incompatiblePathsForVersion[version])
}
| 18 |
eks-anywhere | aws | Go | package framework
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/pkg/awsiam"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/version"
)
const (
AWSIamRoleArn = "T_AWS_IAM_ROLE_ARN"
)
var awsIamRequiredEnvVars = []string{
AWSIamRoleArn,
}
func RequiredAWSIamEnvVars() []string {
return awsIamRequiredEnvVars
}
func WithAWSIam() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
checkRequiredEnvVars(e.T, awsIamRequiredEnvVars)
if e.ClusterConfig.AWSIAMConfigs == nil {
e.ClusterConfig.AWSIAMConfigs = make(map[string]*anywherev1.AWSIamConfig, 1)
}
e.ClusterConfig.AWSIAMConfigs[defaultClusterName] = api.NewAWSIamConfig(defaultClusterName,
api.WithAWSIamAWSRegion("us-west-1"),
api.WithAWSIamPartition("aws"),
api.WithAWSIamBackendMode("EKSConfigMap"),
api.WithAWSIamMapRoles(api.AddAWSIamRole(withArnFromEnv(AWSIamRoleArn), "kubernetes-admin", []string{"system:masters"})),
)
e.clusterFillers = append(e.clusterFillers,
api.WithAWSIamIdentityProviderRef(defaultClusterName),
)
}
}
func withArnFromEnv(envVar string) string {
return os.Getenv(envVar)
}
func (e *ClusterE2ETest) ValidateAWSIamAuth() {
ctx := context.Background()
e.T.Log("Downloading aws-iam-authenticator client")
err := e.downloadAwsIamAuthClient()
if err != nil {
e.T.Fatalf("Error downloading aws-iam-authenticator client: %v", err)
}
e.T.Log("Setting aws-iam-authenticator client in env PATH")
err = e.setIamAuthClientPATH()
if err != nil {
e.T.Fatalf("Error updating PATH: %v", err)
}
kubectlClient := buildLocalKubectl()
e.T.Log("Waiting for aws-iam-authenticator daemonset rollout status")
err = kubectlClient.WaitForResourceRolledout(ctx,
e.Cluster(),
"2m",
"aws-iam-authenticator",
constants.KubeSystemNamespace,
"daemonset",
)
if err != nil {
e.T.Fatalf("Error waiting aws-iam-authenticator daemonset rollout: %v", err)
}
e.T.Log("Getting pods with aws-iam-authenticator kubeconfig")
pods, err := kubectlClient.GetPods(ctx,
executables.WithAllNamespaces(),
executables.WithKubeconfig(e.iamAuthKubeconfigFilePath()),
)
if err != nil {
e.T.Fatalf("Error getting pods: %v", err)
}
if len(pods) > 0 {
e.T.Log("Successfully got pods with aws-iam-authenticator authentication")
}
}
func (e *ClusterE2ETest) downloadAwsIamAuthClient() error {
eksdRelease, err := e.getEksdReleaseManifest()
if err != nil {
return err
}
err = awsiam.DownloadAwsIamAuthClient(eksdRelease)
if err != nil {
return err
}
return nil
}
func (e *ClusterE2ETest) setIamAuthClientPATH() error {
envPath := os.Getenv("PATH")
workDir, err := os.Getwd()
if err != nil {
return fmt.Errorf("finding current working directory: %v", err)
}
iamAuthClientPath := fmt.Sprintf("%s/bin", workDir)
if strings.Contains(envPath, iamAuthClientPath) {
return nil
}
err = os.Setenv("PATH", fmt.Sprintf("%s:%s", iamAuthClientPath, envPath))
if err != nil {
return fmt.Errorf("setting %s to PATH: %v", iamAuthClientPath, err)
}
return nil
}
func (e *ClusterE2ETest) getEksdReleaseManifest() (*eksdv1alpha1.Release, error) {
c := e.ClusterConfig.Cluster
r := manifests.NewReader(newFileReader())
eksdRelease, err := r.ReadEKSD(version.Get().GitVersion, string(c.Spec.KubernetesVersion))
if err != nil {
return nil, fmt.Errorf("getting EKS-D release spec from bundle: %v", err)
}
return eksdRelease, nil
}
func (e *ClusterE2ETest) iamAuthKubeconfigFilePath() string {
return filepath.Join(e.ClusterName, fmt.Sprintf("%s-aws.kubeconfig", e.ClusterName))
}
// WithAwsIamEnvVarCheck returns a ClusterE2ETestOpt that checks for the required env vars.
func WithAwsIamEnvVarCheck() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
checkRequiredEnvVars(e.T, awsIamRequiredEnvVars)
}
}
// WithAwsIamConfig sets aws iam in cluster config.
func WithAwsIamConfig() api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(func(config *cluster.Config) {
config.AWSIAMConfigs[defaultClusterName] = api.NewAWSIamConfig(defaultClusterName,
api.WithAWSIamAWSRegion("us-west-1"),
api.WithAWSIamPartition("aws"),
api.WithAWSIamBackendMode("EKSConfigMap"),
api.WithAWSIamMapRoles(api.AddAWSIamRole(withArnFromEnv(AWSIamRoleArn), "kubernetes-admin", []string{"system:masters"})),
)
}, api.ClusterToConfigFiller(api.WithAWSIamIdentityProviderRef(defaultClusterName)))
}
| 154 |
eks-anywhere | aws | Go | package framework
import (
"context"
"errors"
"os"
"os/exec"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
// WithSkipCiliumUpgrade returns an E2E test option that configures the Cluster object to
// skip Cilium upgrades.
func WithSkipCiliumUpgrade() ClusterE2ETestOpt {
return WithClusterFiller(func(cluster *v1alpha1.Cluster) {
cluster.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(true)
})
}
// UninstallCilium uninstalls the workload clusters Cilium.
func (e *ClusterE2ETest) UninstallCilium() {
e.ValidateCiliumCLIAvailable()
cmd := exec.Command("cilium", "uninstall")
cmd.Env = append(cmd.Env, "KUBECONFIG="+e.KubeconfigFilePath())
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
e.T.Log("Uninstalling Cilium using Cilium CLI")
if err := cmd.Run(); err != nil {
e.T.Fatal(err)
}
}
// ValidateCiliumCLIAvailable ensures the Cilium CLI can be found on the PATH.
func (e *ClusterE2ETest) ValidateCiliumCLIAvailable() {
if _, err := exec.LookPath("cilium"); err != nil {
e.T.Fatal("Cilium CLI is required to run these tests (https://github.com/cilium/cilium-cli).")
}
}
// InstallOSSCilium installs an open source version of Cilium. The version is dependent on the
// Cilium CLI version available on the PATH.
func (e *ClusterE2ETest) InstallOSSCilium() {
e.ValidateCiliumCLIAvailable()
cmd := exec.Command("cilium", "install")
cmd.Env = append(cmd.Env, "KUBECONFIG="+e.KubeconfigFilePath())
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
e.T.Log("Installing OSS Cilium using Cilium CLI")
if err := cmd.Run(); err != nil {
e.T.Fatal(err)
}
}
// ReplaceCiliumWithOSSCilium replaces the current Cilium installation in the workload cluster
// with an open source version. See InstallOSSCilium().
func (e *ClusterE2ETest) ReplaceCiliumWithOSSCilium() {
e.UninstallCilium()
e.InstallOSSCilium()
}
// ValidateEKSACiliumNotInstalled inspects the workload cluster for an EKSA Cilium installation
// erroring if one is found.
func (e *ClusterE2ETest) ValidateEKSACiliumNotInstalled() {
client, err := buildClusterClient(e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("Constructing client: %v", err)
}
install, err := cilium.GetInstallation(context.Background(), client)
if err != nil {
e.T.Fatalf("Getting Cilium installation: %v", err)
}
if install.Installed() {
e.T.Fatal("Unexpected Cilium install found in the workload cluster")
}
}
// ValidateEKSACiliumInstalled inspects the workload cluster for an EKSA Cilium installation
// erroring if one is not found.
func (e *ClusterE2ETest) ValidateEKSACiliumInstalled() {
e.T.Logf("Checking for EKSA Cilium installation with %v", e.KubeconfigFilePath())
client, err := buildClusterClient(e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("Constructing client: %v", err)
}
install, err := cilium.GetInstallation(context.Background(), client)
if err != nil {
e.T.Fatalf("Getting Cilium installation: %v", err)
}
if !install.Installed() {
e.T.Fatal("Expected EKSA Cilium to be installed but found nothing")
}
}
// AwaitCiliumDaemonSetReady awaits the Cilium daemonset to be ready in the cluster represented by client.
// It is ready when the DaemonSet's .Status.NumberUnavailable is 0.
func AwaitCiliumDaemonSetReady(ctx context.Context, client client.Client, retries int, timeout time.Duration) error {
return retrier.Retry(12, timeout, func() error {
installation, err := cilium.GetInstallation(ctx, client)
if err != nil {
return err
}
if installation.DaemonSet == nil {
return errors.New("cilium DaemonSet not found")
}
if installation.DaemonSet.Status.NumberUnavailable != 0 {
return errors.New("DaemonSet not ready")
}
return nil
})
}
| 128 |
eks-anywhere | aws | Go | package framework
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/test/cleanup"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/retrier"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
clusterf "github.com/aws/eks-anywhere/test/framework/cluster"
"github.com/aws/eks-anywhere/test/framework/cluster/validations"
)
const (
cloudstackDomainVar = "T_CLOUDSTACK_DOMAIN"
cloudstackZoneVar = "T_CLOUDSTACK_ZONE"
cloudstackZone2Var = "T_CLOUDSTACK_ZONE_2"
cloudstackZone3Var = "T_CLOUDSTACK_ZONE_3"
cloudstackAccountVar = "T_CLOUDSTACK_ACCOUNT"
cloudstackNetworkVar = "T_CLOUDSTACK_NETWORK"
cloudstackNetwork2Var = "T_CLOUDSTACK_NETWORK_2"
cloudstackNetwork3Var = "T_CLOUDSTACK_NETWORK_3"
cloudstackCredentialsVar = "T_CLOUDSTACK_CREDENTIALS"
cloudstackCredentials2Var = "T_CLOUDSTACK_CREDENTIALS_2"
cloudstackCredentials3Var = "T_CLOUDSTACK_CREDENTIALS_3"
cloudstackManagementServerVar = "T_CLOUDSTACK_MANAGEMENT_SERVER"
cloudstackManagementServer2Var = "T_CLOUDSTACK_MANAGEMENT_SERVER_2"
cloudstackManagementServer3Var = "T_CLOUDSTACK_MANAGEMENT_SERVER_3"
cloudstackSshAuthorizedKeyVar = "T_CLOUDSTACK_SSH_AUTHORIZED_KEY"
cloudstackTemplateRedhat123Var = "T_CLOUDSTACK_TEMPLATE_REDHAT_1_23"
cloudstackTemplateRedhat124Var = "T_CLOUDSTACK_TEMPLATE_REDHAT_1_24"
cloudstackComputeOfferingLargeVar = "T_CLOUDSTACK_COMPUTE_OFFERING_LARGE"
cloudstackComputeOfferingLargerVar = "T_CLOUDSTACK_COMPUTE_OFFERING_LARGER"
cloudStackClusterIPPoolEnvVar = "T_CLOUDSTACK_CLUSTER_IP_POOL"
cloudStackCidrVar = "T_CLOUDSTACK_CIDR"
podCidrVar = "T_CLOUDSTACK_POD_CIDR"
serviceCidrVar = "T_CLOUDSTACK_SERVICE_CIDR"
cloudstackFeatureGateEnvVar = "CLOUDSTACK_PROVIDER"
cloudstackB64EncodedSecretEnvVar = "EKSA_CLOUDSTACK_B64ENCODED_SECRET"
)
var requiredCloudStackEnvVars = []string{
cloudstackAccountVar,
cloudstackDomainVar,
cloudstackZoneVar,
cloudstackZone2Var,
cloudstackZone3Var,
cloudstackCredentialsVar,
cloudstackCredentials2Var,
cloudstackCredentials3Var,
cloudstackAccountVar,
cloudstackNetworkVar,
cloudstackNetwork2Var,
cloudstackNetwork3Var,
cloudstackManagementServerVar,
cloudstackManagementServer2Var,
cloudstackManagementServer3Var,
cloudstackSshAuthorizedKeyVar,
cloudstackTemplateRedhat123Var,
cloudstackTemplateRedhat124Var,
cloudstackComputeOfferingLargeVar,
cloudstackComputeOfferingLargerVar,
cloudStackCidrVar,
podCidrVar,
serviceCidrVar,
cloudstackFeatureGateEnvVar,
cloudstackB64EncodedSecretEnvVar,
}
type CloudStack struct {
t *testing.T
fillers []api.CloudStackFiller
clusterFillers []api.ClusterFiller
cidr string
podCidr string
serviceCidr string
cmkClient *executables.Cmk
devRelease *releasev1.EksARelease
templatesRegistry *templateRegistry
}
type CloudStackOpt func(*CloudStack)
// UpdateRedhatTemplate123Var updates the CloudStackTemplate for all machines to the one corresponding to K8s 1.23.
func UpdateRedhatTemplate123Var() api.CloudStackFiller {
return api.WithCloudStackStringFromEnvVar(cloudstackTemplateRedhat123Var, api.WithCloudStackTemplateForAllMachines)
}
// UpdateRedhatTemplate124Var updates the CloudStackTemplate for all machines to the one corresponding to K8s 1.24.
func UpdateRedhatTemplate124Var() api.CloudStackFiller {
return api.WithCloudStackStringFromEnvVar(cloudstackTemplateRedhat124Var, api.WithCloudStackTemplateForAllMachines)
}
func UpdateLargerCloudStackComputeOffering() api.CloudStackFiller {
return api.WithCloudStackStringFromEnvVar(cloudstackComputeOfferingLargerVar, api.WithCloudStackComputeOfferingForAllMachines)
}
// UpdateAddCloudStackAz3 add availiability zone 3 to the cluster spec.
func UpdateAddCloudStackAz3() api.CloudStackFiller {
return api.WithCloudStackAzFromEnvVars(cloudstackAccountVar, cloudstackDomainVar, cloudstackZone3Var, cloudstackCredentials3Var, cloudstackNetwork3Var,
cloudstackManagementServer3Var, api.WithCloudStackAz)
}
func UpdateAddCloudStackAz2() api.CloudStackFiller {
return api.WithCloudStackAzFromEnvVars(cloudstackAccountVar, cloudstackDomainVar, cloudstackZone2Var, cloudstackCredentials2Var, cloudstackNetwork2Var,
cloudstackManagementServer2Var, api.WithCloudStackAz)
}
func UpdateAddCloudStackAz1() api.CloudStackFiller {
return api.WithCloudStackAzFromEnvVars(cloudstackAccountVar, cloudstackDomainVar, cloudstackZoneVar, cloudstackCredentialsVar, cloudstackNetworkVar,
cloudstackManagementServerVar, api.WithCloudStackAz)
}
func RemoveAllCloudStackAzs() api.CloudStackFiller {
return api.RemoveCloudStackAzs()
}
// CloudStackCredentialsAz1 returns the value of the environment variable for cloudstackCredentialsVar.
func CloudStackCredentialsAz1() string {
return os.Getenv(cloudstackCredentialsVar)
}
func NewCloudStack(t *testing.T, opts ...CloudStackOpt) *CloudStack {
checkRequiredEnvVars(t, requiredCloudStackEnvVars)
cmk := buildCmk(t)
c := &CloudStack{
t: t,
cmkClient: cmk,
fillers: []api.CloudStackFiller{
api.RemoveCloudStackAzs(),
api.WithCloudStackAzFromEnvVars(cloudstackAccountVar, cloudstackDomainVar, cloudstackZoneVar, cloudstackCredentialsVar, cloudstackNetworkVar,
cloudstackManagementServerVar, api.WithCloudStackAz),
api.WithCloudStackStringFromEnvVar(cloudstackSshAuthorizedKeyVar, api.WithCloudStackSSHAuthorizedKey),
api.WithCloudStackStringFromEnvVar(cloudstackComputeOfferingLargeVar, api.WithCloudStackComputeOfferingForAllMachines),
},
}
c.cidr = os.Getenv(cloudStackCidrVar)
c.podCidr = os.Getenv(podCidrVar)
c.serviceCidr = os.Getenv(serviceCidrVar)
c.templatesRegistry = &templateRegistry{cache: map[string]string{}, generator: c}
for _, opt := range opts {
opt(c)
}
return c
}
func WithCloudStackWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup, fillers ...api.CloudStackMachineConfigFiller) CloudStackOpt {
return func(c *CloudStack) {
c.fillers = append(c.fillers, cloudStackMachineConfig(name, fillers...))
c.clusterFillers = append(c.clusterFillers, buildCloudStackWorkerNodeGroupClusterFiller(name, workerNodeGroup))
}
}
// WithCloudStackRedhat123 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.23.
func WithCloudStackRedhat123() CloudStackOpt {
return func(c *CloudStack) {
c.fillers = append(c.fillers,
api.WithCloudStackTemplateForAllMachines(c.templateForDevRelease(anywherev1.RedHat, anywherev1.Kube123)),
)
}
}
// WithCloudStackRedhat124 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.24.
func WithCloudStackRedhat124() CloudStackOpt {
return func(c *CloudStack) {
c.fillers = append(c.fillers,
api.WithCloudStackTemplateForAllMachines(c.templateForDevRelease(anywherev1.RedHat, anywherev1.Kube124)),
)
}
}
func WithCloudStackFillers(fillers ...api.CloudStackFiller) CloudStackOpt {
return func(c *CloudStack) {
c.fillers = append(c.fillers, fillers...)
}
}
func (c *CloudStack) Name() string {
return "cloudstack"
}
func (c *CloudStack) Setup() {}
// UpdateKubeConfig customizes generated kubeconfig for the provider.
func (c *CloudStack) UpdateKubeConfig(content *[]byte, clusterName string) error {
return nil
}
// ClusterConfigUpdates satisfies the test framework Provider.
func (c *CloudStack) ClusterConfigUpdates() []api.ClusterConfigFiller {
controlPlaneIP, err := c.getControlPlaneIP()
if err != nil {
c.t.Fatalf("failed to pop cluster ip from test environment: %v", err)
}
f := make([]api.ClusterFiller, 0, len(c.clusterFillers)+3)
f = append(f, c.clusterFillers...)
f = append(f,
api.WithPodCidr(os.Getenv(podCidrVar)),
api.WithServiceCidr(os.Getenv(serviceCidrVar)),
api.WithControlPlaneEndpointIP(controlPlaneIP))
return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...), api.CloudStackToConfigFiller(c.fillers...)}
}
func (c *CloudStack) CleanupVMs(clusterName string) error {
return cleanup.CleanUpCloudstackTestResources(context.Background(), clusterName, false)
}
func (c *CloudStack) WithProviderUpgrade(fillers ...api.CloudStackFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(api.CloudStackToConfigFiller(fillers...))
}
}
func (c *CloudStack) WithProviderUpgradeGit(fillers ...api.CloudStackFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(api.CloudStackToConfigFiller(fillers...))
}
}
func (c *CloudStack) getControlPlaneIP() (string, error) {
value, ok := os.LookupEnv(cloudStackClusterIPPoolEnvVar)
var clusterIP string
var err error
if ok && value != "" {
clusterIP, err = PopIPFromEnv(cloudStackClusterIPPoolEnvVar)
if err != nil {
c.t.Fatalf("failed to pop cluster ip from test environment: %v", err)
}
} else {
clusterIP, err = GenerateUniqueIp(c.cidr)
if err != nil {
c.t.Fatalf("failed to generate ip for cloudstack %s: %v", c.cidr, err)
}
}
return clusterIP, nil
}
func RequiredCloudstackEnvVars() []string {
return requiredCloudStackEnvVars
}
func (c *CloudStack) WithNewCloudStackWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup, fillers ...api.CloudStackMachineConfigFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(
api.CloudStackToConfigFiller(cloudStackMachineConfig(name, fillers...)),
api.ClusterToConfigFiller(buildCloudStackWorkerNodeGroupClusterFiller(name, workerNodeGroup)),
)
}
}
// WithNewWorkerNodeGroup returns an api.ClusterFiller that adds a new workerNodeGroupConfiguration and
// a corresponding CloudStackMachineConfig to the cluster config.
func (c *CloudStack) WithNewWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
api.CloudStackToConfigFiller(cloudStackMachineConfig(name)),
api.ClusterToConfigFiller(buildCloudStackWorkerNodeGroupClusterFiller(name, workerNodeGroup)),
)
}
// WithWorkerNodeGroupConfiguration returns an api.ClusterFiller that adds a new workerNodeGroupConfiguration item to the cluster config.
func (c *CloudStack) WithWorkerNodeGroupConfiguration(name string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller {
return api.ClusterToConfigFiller(buildCloudStackWorkerNodeGroupClusterFiller(name, workerNodeGroup))
}
func cloudStackMachineConfig(name string, fillers ...api.CloudStackMachineConfigFiller) api.CloudStackFiller {
f := make([]api.CloudStackMachineConfigFiller, 0, len(fillers)+2)
// Need to add these because at this point the default fillers that assign these
// values to all machines have already ran
f = append(f,
api.WithCloudStackComputeOffering(os.Getenv(cloudstackComputeOfferingLargeVar)),
api.WithCloudStackSSHKey(os.Getenv(cloudstackSshAuthorizedKeyVar)),
)
f = append(f, fillers...)
return api.WithCloudStackMachineConfig(name, f...)
}
// Redhat123Template returns cloudstack filler for 1.23 Ubuntu.
func (c *CloudStack) Redhat123Template() api.CloudStackFiller {
return api.WithCloudStackStringFromEnvVar(cloudstackTemplateRedhat123Var, api.WithCloudStackTemplateForAllMachines)
}
// Redhat124Template returns cloudstack filler for 1.24 Ubuntu.
func (c *CloudStack) Redhat124Template() api.CloudStackFiller {
return api.WithCloudStackStringFromEnvVar(cloudstackTemplateRedhat124Var, api.WithCloudStackTemplateForAllMachines)
}
func buildCloudStackWorkerNodeGroupClusterFiller(machineConfigName string, workerNodeGroup *WorkerNodeGroup) api.ClusterFiller {
// Set worker node group ref to cloudstack machine config
workerNodeGroup.MachineConfigKind = anywherev1.CloudStackMachineConfigKind
workerNodeGroup.MachineConfigName = machineConfigName
return workerNodeGroup.ClusterFiller()
}
// ClusterStateValidations returns a list of provider specific validations.
func (c *CloudStack) ClusterStateValidations() []clusterf.StateValidation {
return []clusterf.StateValidation{
clusterf.RetriableStateValidation(
retrier.NewWithMaxRetries(60, 5*time.Second),
validations.ValidateAvailabilityZones,
),
}
}
// WithRedhat123 returns a cluster config filler that sets the kubernetes version of the cluster to 1.23
// as well as the right redhat template for all CloudStackMachineConfigs.
func (c *CloudStack) WithRedhat123() api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(api.WithKubernetesVersion(anywherev1.Kube123)),
api.CloudStackToConfigFiller(
UpdateRedhatTemplate123Var(),
),
)
}
// WithRedhat124 returns a cluster config filler that sets the kubernetes version of the cluster to 1.24
// as well as the right redhat template for all CloudStackMachineConfigs.
func (c *CloudStack) WithRedhat124() api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(api.WithKubernetesVersion(anywherev1.Kube124)),
api.CloudStackToConfigFiller(
UpdateRedhatTemplate124Var(),
),
)
}
// WithRedhatVersion returns a cluster config filler that sets the kubernetes version of the cluster to the k8s
// version provider, as well as the right redhat template for all CloudStackMachineConfigs.
func (c *CloudStack) WithRedhatVersion(version anywherev1.KubernetesVersion) api.ClusterConfigFiller {
switch version {
case anywherev1.Kube123:
return c.WithRedhat123()
case anywherev1.Kube124:
return c.WithRedhat124()
default:
return nil
}
}
func (c *CloudStack) getDevRelease() *releasev1.EksARelease {
c.t.Helper()
if c.devRelease == nil {
latestRelease, err := getLatestDevRelease()
if err != nil {
c.t.Fatal(err)
}
c.devRelease = latestRelease
}
return c.devRelease
}
func (c *CloudStack) templateForDevRelease(osFamily anywherev1.OSFamily, kubeVersion anywherev1.KubernetesVersion) string {
c.t.Helper()
return c.templatesRegistry.templateForRelease(c.t, osFamily, c.getDevRelease(), kubeVersion)
}
// envVarForTemplate Looks for explicit configuration through an env var: "T_CLOUDSTACK_TEMPLATE_{osFamily}_{eks-d version}"
// eg: T_CLOUDSTACK_TEMPLATE_REDHAT_KUBERNETES_1_23_EKS_22.
func (c *CloudStack) envVarForTemplate(osFamily, eksDName string) string {
return fmt.Sprintf("T_CLOUDSTACK_TEMPLATE_%s_%s", strings.ToUpper(osFamily), strings.ToUpper(strings.ReplaceAll(eksDName, "-", "_")))
}
// defaultNameForTemplate looks for a template: "{eks-d version}-{osFamily}"
// eg: kubernetes-1-23-eks-22-redhat.
func (c *CloudStack) defaultNameForTemplate(osFamily, eksDName string) string {
return filepath.Join(fmt.Sprintf("%s-%s", strings.ToLower(eksDName), strings.ToLower(osFamily)))
}
// defaultEnvVarForTemplate returns the value of the default template env vars: "T_CLOUDSTACK_TEMPLATE_{osFamily}_{kubeVersion}"
// eg. T_CLOUDSTACK_TEMPLATE_REDHAT_1_23.
func (c *CloudStack) defaultEnvVarForTemplate(osFamily string, kubeVersion anywherev1.KubernetesVersion) string {
return fmt.Sprintf("T_CLOUDSTACK_TEMPLATE_%s_%s", strings.ToUpper(osFamily), strings.ReplaceAll(string(kubeVersion), ".", "_"))
}
// searchTemplate returns template name if the given template exists in the datacenter.
func (c *CloudStack) searchTemplate(ctx context.Context, template string) (string, error) {
profile, ok := os.LookupEnv(cloudstackCredentialsVar)
if !ok {
return "", fmt.Errorf("Required environment variable for CloudStack not set: %s", cloudstackCredentialsVar)
}
templateResource := v1alpha1.CloudStackResourceIdentifier{
Name: template,
}
template, err := c.cmkClient.SearchTemplate(context.Background(), profile, templateResource)
if err != nil {
return "", err
}
return template, nil
}
// WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all
// cloudstack machine configs.
func (c *CloudStack) WithKubeVersionAndOS(osFamily anywherev1.OSFamily, kubeVersion anywherev1.KubernetesVersion) api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)),
api.CloudStackToConfigFiller(
api.WithCloudStackTemplateForAllMachines(c.templateForDevRelease(osFamily, kubeVersion)),
),
)
}
| 415 |
eks-anywhere | aws | Go | package framework
import (
"bufio"
"bytes"
"context"
"crypto/sha1"
_ "embed"
"encoding/json"
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
rapi "github.com/tinkerbell/rufio/api/v1alpha1"
rctrl "github.com/tinkerbell/rufio/controllers"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/git"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
clusterf "github.com/aws/eks-anywhere/test/framework/cluster"
)
const (
defaultClusterConfigFile = "cluster.yaml"
defaultBundleReleaseManifestFile = "bin/local-bundle-release.yaml"
defaultEksaBinaryLocation = "eksctl anywhere"
defaultClusterName = "eksa-test"
defaultDownloadArtifactsOutputLocation = "eks-anywhere-downloads.tar.gz"
defaultDownloadImagesOutputLocation = "images.tar"
eksctlVersionEnvVar = "EKSCTL_VERSION"
eksctlVersionEnvVarDummyVal = "ham sandwich"
ClusterPrefixVar = "T_CLUSTER_PREFIX"
JobIdVar = "T_JOB_ID"
BundlesOverrideVar = "T_BUNDLES_OVERRIDE"
ClusterIPPoolEnvVar = "T_CLUSTER_IP_POOL"
CleanupVmsVar = "T_CLEANUP_VMS"
hardwareYamlPath = "hardware.yaml"
hardwareCsvPath = "hardware.csv"
EksaPackagesInstallation = "eks-anywhere-packages"
)
//go:embed testdata/oidc-roles.yaml
var oidcRoles []byte
//go:embed testdata/hpa_busybox.yaml
var hpaBusybox []byte
type ClusterE2ETest struct {
T T
ClusterConfigLocation string
ClusterConfigFolder string
HardwareConfigLocation string
HardwareCsvLocation string
TestHardware map[string]*api.Hardware
HardwarePool map[string]*api.Hardware
WithNoPowerActions bool
ClusterName string
ClusterConfig *cluster.Config
clusterStateValidationConfig *clusterf.StateValidationConfig
Provider Provider
// TODO(g-gaston): migrate uses of clusterFillers to clusterConfigFillers
clusterFillers []api.ClusterFiller
clusterConfigFillers []api.ClusterConfigFiller
KubectlClient *executables.Kubectl
GitProvider git.ProviderClient
GitClient git.Client
HelmInstallConfig *HelmInstallConfig
PackageConfig *PackageConfig
GitWriter filewriter.FileWriter
eksaBinaryLocation string
ExpectFailure bool
// PersistentCluster avoids creating the clusters if it finds a kubeconfig
// in the corresponding cluster folder. Useful for local development of tests.
// When generating a new base cluster config, it will read from disk instead of
// using the CLI generate command and will preserve the previous CP endpoint.
PersistentCluster bool
}
type ClusterE2ETestOpt func(e *ClusterE2ETest)
// NewClusterE2ETest is a support structure for defining an end-to-end test.
func NewClusterE2ETest(t T, provider Provider, opts ...ClusterE2ETestOpt) *ClusterE2ETest {
e := &ClusterE2ETest{
T: t,
Provider: provider,
ClusterConfig: &cluster.Config{},
ClusterConfigLocation: defaultClusterConfigFile,
ClusterName: getClusterName(t),
clusterFillers: make([]api.ClusterFiller, 0),
KubectlClient: buildKubectl(t),
eksaBinaryLocation: defaultEksaBinaryLocation,
}
for _, opt := range opts {
opt(e)
}
if e.ClusterConfigFolder == "" {
e.ClusterConfigFolder = e.ClusterName
}
if e.HardwareConfigLocation == "" {
e.HardwareConfigLocation = filepath.Join(e.ClusterConfigFolder, hardwareYamlPath)
}
if e.HardwareCsvLocation == "" {
e.HardwareCsvLocation = filepath.Join(e.ClusterConfigFolder, hardwareCsvPath)
}
e.ClusterConfigLocation = filepath.Join(e.ClusterConfigFolder, e.ClusterName+"-eks-a.yaml")
if err := os.MkdirAll(e.ClusterConfigFolder, os.ModePerm); err != nil {
t.Fatalf("Failed creating cluster config folder for test: %s", err)
}
provider.Setup()
e.T.Cleanup(func() {
e.CleanupVms()
tinkerbellCIEnvironment := os.Getenv(TinkerbellCIEnvironment)
if e.Provider.Name() == TinkerbellProviderName && tinkerbellCIEnvironment == "true" {
e.CleanupDockerEnvironment()
}
})
return e
}
func withHardware(requiredCount int, hardareType string, labels map[string]string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
hardwarePool := e.GetHardwarePool()
if e.TestHardware == nil {
e.TestHardware = make(map[string]*api.Hardware)
}
var count int
for id, h := range hardwarePool {
if _, exists := e.TestHardware[id]; !exists {
count++
h.Labels = labels
e.TestHardware[id] = h
}
if count == requiredCount {
break
}
}
if count < requiredCount {
e.T.Errorf("this test requires at least %d piece(s) of %s hardware", requiredCount, hardareType)
}
}
}
func WithNoPowerActions() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.WithNoPowerActions = true
}
}
func ExpectFailure(expected bool) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.ExpectFailure = expected
}
}
func WithControlPlaneHardware(requiredCount int) ClusterE2ETestOpt {
return withHardware(
requiredCount,
api.ControlPlane,
map[string]string{api.HardwareLabelTypeKeyName: api.ControlPlane},
)
}
func WithWorkerHardware(requiredCount int) ClusterE2ETestOpt {
return withHardware(requiredCount, api.Worker, map[string]string{api.HardwareLabelTypeKeyName: api.Worker})
}
func WithCustomLabelHardware(requiredCount int, label string) ClusterE2ETestOpt {
return withHardware(requiredCount, api.Worker, map[string]string{api.HardwareLabelTypeKeyName: label})
}
func WithExternalEtcdHardware(requiredCount int) ClusterE2ETestOpt {
return withHardware(
requiredCount,
api.ExternalEtcd,
map[string]string{api.HardwareLabelTypeKeyName: api.ExternalEtcd},
)
}
// WithClusterName sets the name that will be used for the cluster. This will drive both the name of the eks-a
// cluster config objects as well as the cluster config file name.
func WithClusterName(name string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.ClusterName = name
}
}
// PersistentCluster avoids creating the clusters if it finds a kubeconfig
// in the corresponding cluster folder. Useful for local development of tests.
func PersistentCluster() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.PersistentCluster = true
}
}
func (e *ClusterE2ETest) GetHardwarePool() map[string]*api.Hardware {
if e.HardwarePool == nil {
csvFilePath := os.Getenv(tinkerbellInventoryCsvFilePathEnvVar)
var err error
e.HardwarePool, err = api.NewHardwareMapFromFile(csvFilePath)
if err != nil {
e.T.Fatalf("failed to create hardware map from test hardware pool: %v", err)
}
}
return e.HardwarePool
}
func (e *ClusterE2ETest) RunClusterFlowWithGitOps(clusterOpts ...ClusterE2ETestOpt) {
e.GenerateClusterConfig()
e.createCluster()
e.UpgradeWithGitOps(clusterOpts...)
time.Sleep(5 * time.Minute)
e.deleteCluster()
}
func WithClusterFiller(f ...api.ClusterFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.clusterFillers = append(e.clusterFillers, f...)
}
}
// WithClusterSingleNode helps to create an e2e test option for a single node cluster.
func WithClusterSingleNode(v v1alpha1.KubernetesVersion) ClusterE2ETestOpt {
return WithClusterFiller(
api.WithKubernetesVersion(v),
api.WithControlPlaneCount(1),
api.WithEtcdCountIfExternal(0),
api.RemoveAllWorkerNodeGroups(),
)
}
func WithClusterConfigLocationOverride(path string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.ClusterConfigLocation = path
}
}
func WithEksaVersion(version *semver.Version) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
eksaBinaryLocation, err := GetReleaseBinaryFromVersion(version)
if err != nil {
e.T.Fatal(err)
}
e.eksaBinaryLocation = eksaBinaryLocation
err = setEksctlVersionEnvVar()
if err != nil {
e.T.Fatal(err)
}
}
}
func WithLatestMinorReleaseFromMain() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
eksaBinaryLocation, err := GetLatestMinorReleaseBinaryFromMain()
if err != nil {
e.T.Fatal(err)
}
e.eksaBinaryLocation = eksaBinaryLocation
err = setEksctlVersionEnvVar()
if err != nil {
e.T.Fatal(err)
}
}
}
func WithEnvVar(key, val string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
err := os.Setenv(key, val)
if err != nil {
e.T.Fatalf("couldn't set env var %s to value %s due to: %v", key, val, err)
}
}
}
type Provider interface {
Name() string
// ClusterConfigUpdates allows a provider to modify the default cluster config
// after this one is generated for the first time. This is not reapplied on every CLI operation.
// Prefer to call UpdateClusterConfig directly from the tests to make it more explicit.
ClusterConfigUpdates() []api.ClusterConfigFiller
Setup()
CleanupVMs(clusterName string) error
UpdateKubeConfig(content *[]byte, clusterName string) error
ClusterStateValidations() []clusterf.StateValidation
WithKubeVersionAndOS(osFamily v1alpha1.OSFamily, kubeVersion v1alpha1.KubernetesVersion) api.ClusterConfigFiller
WithNewWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller
}
func (e *ClusterE2ETest) GenerateClusterConfig(opts ...CommandOpt) {
e.GenerateClusterConfigForVersion("", opts...)
}
func (e *ClusterE2ETest) PowerOffHardware() {
// Initializing BMC Client
ctx := context.Background()
bmcClientFactory := rctrl.NewBMCClientFactoryFunc(ctx)
for _, h := range e.TestHardware {
bmcClient, err := bmcClientFactory(ctx, h.BMCIPAddress, "623", h.BMCUsername, h.BMCPassword)
if err != nil {
e.T.Fatalf("failed to create bmc client: %v", err)
}
defer func() {
// Close BMC connection after reconcilation
err = bmcClient.Close(ctx)
if err != nil {
e.T.Fatalf("BMC close connection failed: %v", err)
}
}()
_, err = bmcClient.SetPowerState(ctx, string(rapi.Off))
if err != nil {
e.T.Fatalf("failed to power off hardware: %v", err)
}
}
}
func (e *ClusterE2ETest) PXEBootHardware() {
// Initializing BMC Client
ctx := context.Background()
bmcClientFactory := rctrl.NewBMCClientFactoryFunc(ctx)
for _, h := range e.TestHardware {
bmcClient, err := bmcClientFactory(ctx, h.BMCIPAddress, "623", h.BMCUsername, h.BMCPassword)
if err != nil {
e.T.Fatalf("failed to create bmc client: %v", err)
}
defer func() {
// Close BMC connection after reconcilation
err = bmcClient.Close(ctx)
if err != nil {
e.T.Fatalf("BMC close connection failed: %v", err)
}
}()
_, err = bmcClient.SetBootDevice(ctx, string(rapi.PXE), false, true)
if err != nil {
e.T.Fatalf("failed to pxe boot hardware: %v", err)
}
}
}
func (e *ClusterE2ETest) PowerOnHardware() {
// Initializing BMC Client
ctx := context.Background()
bmcClientFactory := rctrl.NewBMCClientFactoryFunc(ctx)
for _, h := range e.TestHardware {
bmcClient, err := bmcClientFactory(ctx, h.BMCIPAddress, "623", h.BMCUsername, h.BMCPassword)
if err != nil {
e.T.Fatalf("failed to create bmc client: %v", err)
}
defer func() {
// Close BMC connection after reconcilation
err = bmcClient.Close(ctx)
if err != nil {
e.T.Fatalf("BMC close connection failed: %v", err)
}
}()
_, err = bmcClient.SetPowerState(ctx, string(rapi.On))
if err != nil {
e.T.Fatalf("failed to power on hardware: %v", err)
}
}
}
func (e *ClusterE2ETest) ValidateHardwareDecommissioned() {
// Initializing BMC Client
ctx := context.Background()
bmcClientFactory := rctrl.NewBMCClientFactoryFunc(ctx)
var failedToDecomm []*api.Hardware
for _, h := range e.TestHardware {
bmcClient, err := bmcClientFactory(ctx, h.BMCIPAddress, "443", h.BMCUsername, h.BMCPassword)
if err != nil {
e.T.Fatalf("failed to create bmc client: %v", err)
}
defer func() {
// Close BMC connection after reconcilation
err = bmcClient.Close(ctx)
if err != nil {
e.T.Fatalf("BMC close connection failed: %v", err)
}
}()
powerState, err := bmcClient.GetPowerState(ctx)
// add sleep retries to give the machine time to power off
timeout := 15
for !strings.EqualFold(powerState, string(rapi.Off)) && timeout > 0 {
if err != nil {
e.T.Logf("failed to get power state for hardware (%v): %v", h, err)
}
time.Sleep(5 * time.Second)
timeout = timeout - 5
powerState, err = bmcClient.GetPowerState(ctx)
e.T.Logf(
"hardware power state (id=%s, hostname=%s, bmc_ip=%s): power_state=%s",
h.MACAddress,
h.Hostname,
h.BMCIPAddress,
powerState,
)
}
if !strings.EqualFold(powerState, string(rapi.Off)) {
e.T.Logf(
"failed to decommission hardware: id=%s, hostname=%s, bmc_ip=%s",
h.MACAddress,
h.Hostname,
h.BMCIPAddress,
)
failedToDecomm = append(failedToDecomm, h)
} else {
e.T.Logf("successfully decommissioned hardware: id=%s, hostname=%s, bmc_ip=%s", h.MACAddress, h.Hostname, h.BMCIPAddress)
}
}
if len(failedToDecomm) > 0 {
e.T.Fatalf("failed to decommision hardware during cluster deletion")
}
}
func (e *ClusterE2ETest) GenerateHardwareConfig(opts ...CommandOpt) {
e.generateHardwareConfig(opts...)
}
func (e *ClusterE2ETest) generateHardwareConfig(opts ...CommandOpt) {
if len(e.TestHardware) == 0 {
e.T.Fatal("you must provide the ClusterE2ETest the hardware to use for the test run")
}
if _, err := os.Stat(e.HardwareCsvLocation); err == nil {
os.Remove(e.HardwareCsvLocation)
}
testHardware := e.TestHardware
if e.WithNoPowerActions {
hardwareWithNoBMC := make(map[string]*api.Hardware)
for k, h := range testHardware {
lessBmc := *h
lessBmc.BMCIPAddress = ""
lessBmc.BMCUsername = ""
lessBmc.BMCPassword = ""
hardwareWithNoBMC[k] = &lessBmc
}
testHardware = hardwareWithNoBMC
}
err := api.WriteHardwareMapToCSV(testHardware, e.HardwareCsvLocation)
if err != nil {
e.T.Fatalf("failed to create hardware csv for the test run: %v", err)
}
generateHardwareConfigArgs := []string{
"generate", "hardware",
"-z", e.HardwareCsvLocation,
"-o", e.HardwareConfigLocation,
}
e.RunEKSA(generateHardwareConfigArgs, opts...)
}
func (e *ClusterE2ETest) GenerateClusterConfigForVersion(eksaVersion string, opts ...CommandOpt) {
e.generateClusterConfigObjects(opts...)
if eksaVersion != "" {
err := cleanUpClusterForVersion(e.ClusterConfig, eksaVersion)
if err != nil {
e.T.Fatal(err)
}
}
e.buildClusterConfigFile()
}
func (e *ClusterE2ETest) generateClusterConfigObjects(opts ...CommandOpt) {
e.generateClusterConfigWithCLI(opts...)
config, err := cluster.ParseConfigFromFile(e.ClusterConfigLocation)
if err != nil {
e.T.Fatalf("Failed parsing generated cluster config: %s", err)
}
// Copy all objects that might be generated by the CLI.
// Don't replace the whole ClusterConfig since some ClusterE2ETestOpt might
// have already set some data in it.
e.ClusterConfig.Cluster = config.Cluster
e.ClusterConfig.CloudStackDatacenter = config.CloudStackDatacenter
e.ClusterConfig.VSphereDatacenter = config.VSphereDatacenter
e.ClusterConfig.DockerDatacenter = config.DockerDatacenter
e.ClusterConfig.SnowDatacenter = config.SnowDatacenter
e.ClusterConfig.NutanixDatacenter = config.NutanixDatacenter
e.ClusterConfig.TinkerbellDatacenter = config.TinkerbellDatacenter
e.ClusterConfig.VSphereMachineConfigs = config.VSphereMachineConfigs
e.ClusterConfig.CloudStackMachineConfigs = config.CloudStackMachineConfigs
e.ClusterConfig.SnowMachineConfigs = config.SnowMachineConfigs
e.ClusterConfig.SnowIPPools = config.SnowIPPools
e.ClusterConfig.NutanixMachineConfigs = config.NutanixMachineConfigs
e.ClusterConfig.TinkerbellMachineConfigs = config.TinkerbellMachineConfigs
e.ClusterConfig.TinkerbellTemplateConfigs = config.TinkerbellTemplateConfigs
e.UpdateClusterConfig(e.baseClusterConfigUpdates()...)
}
// UpdateClusterConfig applies the cluster Config provided updates to e.ClusterConfig, marshalls its content
// to yaml and writes it to a file on disk configured by e.ClusterConfigLocation. Call this method when you want
// make changes to the eks-a cluster definition before running a CLI command or API operation.
func (e *ClusterE2ETest) UpdateClusterConfig(fillers ...api.ClusterConfigFiller) {
e.T.Log("Updating cluster config")
api.UpdateClusterConfig(e.ClusterConfig, fillers...)
e.T.Logf("Writing cluster config to file: %s", e.ClusterConfigLocation)
e.buildClusterConfigFile()
}
func (e *ClusterE2ETest) baseClusterConfigUpdates(opts ...CommandOpt) []api.ClusterConfigFiller {
clusterFillers := make([]api.ClusterFiller, 0, len(e.clusterFillers)+3)
// This defaults all tests to a 1:1:1 configuration. Since all the fillers defined on each test are run
// after these 3, if the tests is explicit about any of these, the defaults will be overwritten
clusterFillers = append(clusterFillers,
api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithEtcdCountIfExternal(1),
)
clusterFillers = append(clusterFillers, e.clusterFillers...)
configFillers := make([]api.ClusterConfigFiller, 0, len(e.clusterConfigFillers)+1)
configFillers = append(configFillers, api.ClusterToConfigFiller(clusterFillers...))
configFillers = append(configFillers, e.clusterConfigFillers...)
configFillers = append(configFillers, e.Provider.ClusterConfigUpdates()...)
// If we are persisting an existing cluster, set the control plane endpoint back to the original, since
// it is immutable
if e.ClusterConfig.Cluster.Spec.DatacenterRef.Kind != v1alpha1.DockerDatacenterKind && e.PersistentCluster && e.ClusterConfig.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host != "" {
endpoint := e.ClusterConfig.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host
e.T.Logf("Resetting CP endpoint for persistent cluster to %s", endpoint)
configFillers = append(configFillers,
api.ClusterToConfigFiller(api.WithControlPlaneEndpointIP(endpoint)),
)
}
return configFillers
}
func (e *ClusterE2ETest) generateClusterConfigWithCLI(opts ...CommandOpt) {
if e.PersistentCluster && fileExists(e.ClusterConfigLocation) {
e.T.Log("Skipping CLI cluster generation since this is a persistent cluster that already had one cluster config generated")
return
}
generateClusterConfigArgs := []string{"generate", "clusterconfig", e.ClusterName, "-p", e.Provider.Name(), ">", e.ClusterConfigLocation}
e.RunEKSA(generateClusterConfigArgs, opts...)
e.T.Log("Cluster config generated with CLI")
}
func (e *ClusterE2ETest) parseClusterConfigFromDisk(file string) {
e.T.Logf("Parsing cluster config from disk: %s", file)
config, err := cluster.ParseConfigFromFile(file)
if err != nil {
e.T.Fatalf("Failed parsing generated cluster config: %s", err)
}
e.ClusterConfig = config
}
// WithClusterConfig generates a base cluster config using the CLI `generate clusterconfig` command
// and updates them with the provided fillers. Helpful for defining the initial Cluster config
// before running a create operation.
func (e *ClusterE2ETest) WithClusterConfig(fillers ...api.ClusterConfigFiller) *ClusterE2ETest {
e.T.Logf("Generating base config for cluster %s", e.ClusterName)
e.generateClusterConfigWithCLI()
e.parseClusterConfigFromDisk(e.ClusterConfigLocation)
base := e.baseClusterConfigUpdates()
allUpdates := make([]api.ClusterConfigFiller, 0, len(base)+len(fillers))
allUpdates = append(allUpdates, base...)
allUpdates = append(allUpdates, fillers...)
e.UpdateClusterConfig(allUpdates...)
return e
}
// DownloadArtifacts runs the EKS-A `download artifacts` command with appropriate args.
func (e *ClusterE2ETest) DownloadArtifacts(opts ...CommandOpt) {
downloadArtifactsArgs := []string{"download", "artifacts", "-f", e.ClusterConfigLocation}
if getBundlesOverride() == "true" {
downloadArtifactsArgs = append(downloadArtifactsArgs, "--bundles-override", defaultBundleReleaseManifestFile)
}
e.RunEKSA(downloadArtifactsArgs, opts...)
if _, err := os.Stat(defaultDownloadArtifactsOutputLocation); err != nil {
e.T.Fatal(err)
} else {
e.T.Logf("Downloaded artifacts tarball saved at %s", defaultDownloadArtifactsOutputLocation)
}
}
// ExtractDownloadedArtifacts extracts the downloaded artifacts.
func (e *ClusterE2ETest) ExtractDownloadedArtifacts(opts ...CommandOpt) {
e.T.Log("Extracting downloaded artifacts")
e.Run("tar", "-xf", defaultDownloadArtifactsOutputLocation)
}
// CleanupDownloadedArtifactsAndImages cleans up the downloaded artifacts and images.
func (e *ClusterE2ETest) CleanupDownloadedArtifactsAndImages(opts ...CommandOpt) {
e.T.Log("Cleaning up downloaded artifacts and images")
e.Run("rm", "-rf", defaultDownloadArtifactsOutputLocation, defaultDownloadImagesOutputLocation)
}
// DownloadImages runs the EKS-A `download images` command with appropriate args.
func (e *ClusterE2ETest) DownloadImages(opts ...CommandOpt) {
downloadImagesArgs := []string{"download", "images", "-o", defaultDownloadImagesOutputLocation}
if getBundlesOverride() == "true" {
var bundleManifestLocation string
if _, err := os.Stat(defaultDownloadArtifactsOutputLocation); err == nil {
bundleManifestLocation = "eks-anywhere-downloads/bundle-release.yaml"
} else {
bundleManifestLocation = defaultBundleReleaseManifestFile
}
downloadImagesArgs = append(downloadImagesArgs, "--bundles-override", bundleManifestLocation)
}
e.RunEKSA(downloadImagesArgs, opts...)
if _, err := os.Stat(defaultDownloadImagesOutputLocation); err != nil {
e.T.Fatal(err)
} else {
e.T.Logf("Downloaded images archive saved at %s", defaultDownloadImagesOutputLocation)
}
}
// ImportImages runs the EKS-A `import images` command with appropriate args.
func (e *ClusterE2ETest) ImportImages(opts ...CommandOpt) {
clusterConfig := e.ClusterConfig.Cluster
registyMirrorEndpoint, registryMirrorPort := clusterConfig.Spec.RegistryMirrorConfiguration.Endpoint, clusterConfig.Spec.RegistryMirrorConfiguration.Port
registryMirrorHost := net.JoinHostPort(registyMirrorEndpoint, registryMirrorPort)
var bundleManifestLocation string
if _, err := os.Stat(defaultDownloadArtifactsOutputLocation); err == nil {
bundleManifestLocation = "eks-anywhere-downloads/bundle-release.yaml"
} else {
bundleManifestLocation = defaultBundleReleaseManifestFile
}
importImagesArgs := []string{"import images", "--input", defaultDownloadImagesOutputLocation, "--bundles", bundleManifestLocation, "--registry", registryMirrorHost, "--insecure"}
e.RunEKSA(importImagesArgs, opts...)
}
// ChangeInstanceSecurityGroup modifies the security group of the instance to the provided value.
func (e *ClusterE2ETest) ChangeInstanceSecurityGroup(securityGroup string) {
e.T.Logf("Changing instance security group to %s", securityGroup)
e.Run(fmt.Sprintf("INSTANCE_ID=$(ec2-metadata -i | awk '{print $2}') && aws ec2 modify-instance-attribute --instance-id $INSTANCE_ID --groups %s", securityGroup))
}
func (e *ClusterE2ETest) CreateCluster(opts ...CommandOpt) {
e.setFeatureFlagForUnreleasedKubernetesVersion(e.ClusterConfig.Cluster.Spec.KubernetesVersion)
e.createCluster(opts...)
}
func (e *ClusterE2ETest) createCluster(opts ...CommandOpt) {
if e.PersistentCluster {
if fileExists(e.KubeconfigFilePath()) {
e.T.Logf("Persisent cluster: kubeconfig found for cluster %s, skipping cluster creation", e.ClusterName)
return
}
}
e.T.Logf("Creating cluster %s", e.ClusterName)
createClusterArgs := []string{"create", "cluster", "-f", e.ClusterConfigLocation, "-v", "12", "--force-cleanup"}
dumpFile("Create cluster from file:", e.ClusterConfigLocation, e.T)
if getBundlesOverride() == "true" {
createClusterArgs = append(createClusterArgs, "--bundles-override", defaultBundleReleaseManifestFile)
}
if e.Provider.Name() == TinkerbellProviderName {
createClusterArgs = append(createClusterArgs, "-z", e.HardwareCsvLocation)
dumpFile("Hardware csv file:", e.HardwareCsvLocation, e.T)
tinkBootstrapIP := os.Getenv(tinkerbellBootstrapIPEnvVar)
e.T.Logf("tinkBootstrapIP: %s", tinkBootstrapIP)
if tinkBootstrapIP != "" {
createClusterArgs = append(createClusterArgs, "--tinkerbell-bootstrap-ip", tinkBootstrapIP)
}
}
e.RunEKSA(createClusterArgs, opts...)
}
func (e *ClusterE2ETest) ValidateCluster(kubeVersion v1alpha1.KubernetesVersion) {
ctx := context.Background()
e.T.Log("Validating cluster node status")
r := retrier.New(10 * time.Minute)
err := r.Retry(func() error {
err := e.KubectlClient.ValidateNodes(ctx, e.Cluster().KubeconfigFile)
if err != nil {
return fmt.Errorf("validating nodes status: %v", err)
}
return nil
})
if err != nil {
e.T.Fatal(err)
}
e.T.Log("Validating cluster node version")
err = retrier.Retry(180, 1*time.Second, func() error {
if err = e.KubectlClient.ValidateNodesVersion(ctx, e.Cluster().KubeconfigFile, kubeVersion); err != nil {
return fmt.Errorf("validating nodes version: %v", err)
}
return nil
})
if err != nil {
e.T.Fatal(err)
}
}
func (e *ClusterE2ETest) WaitForMachineDeploymentReady(machineDeploymentName string) {
ctx := context.Background()
e.T.Logf("Waiting for machine deployment %s to be ready for cluster %s", machineDeploymentName, e.ClusterName)
err := e.KubectlClient.WaitForMachineDeploymentReady(ctx, e.Cluster(), "5m", machineDeploymentName)
if err != nil {
e.T.Fatal(err)
}
}
// GetEKSACluster retrieves the EKSA cluster from the runtime environment using kubectl.
func (e *ClusterE2ETest) GetEKSACluster() *v1alpha1.Cluster {
ctx := context.Background()
clus, err := e.KubectlClient.GetEksaCluster(ctx, e.Cluster(), e.ClusterName)
if err != nil {
e.T.Fatal(err)
}
return clus
}
func (e *ClusterE2ETest) GetCapiMachinesForCluster(clusterName string) map[string]types.Machine {
machines, err := e.CapiMachinesForCluster(clusterName)
if err != nil {
e.T.Fatal(err)
}
return machines
}
// CapiMachinesForCluster reads all the CAPI Machines for a particular cluster and returns them
// index by their name.
func (e *ClusterE2ETest) CapiMachinesForCluster(clusterName string) (map[string]types.Machine, error) {
ctx := context.Background()
capiMachines, err := e.KubectlClient.GetMachines(ctx, e.Cluster(), clusterName)
if err != nil {
return nil, err
}
machinesMap := make(map[string]types.Machine, 0)
for _, machine := range capiMachines {
machinesMap[machine.Metadata.Name] = machine
}
return machinesMap, nil
}
// ApplyClusterManifest uses client-side logic to create/update objects defined in a cluster yaml manifest.
func (e *ClusterE2ETest) ApplyClusterManifest() {
ctx := context.Background()
e.T.Logf("Applying cluster %s spec located at %s", e.ClusterName, e.ClusterConfigLocation)
e.applyClusterManifest(ctx)
}
func (e *ClusterE2ETest) applyClusterManifest(ctx context.Context) {
if err := e.KubectlClient.ApplyManifest(ctx, e.KubeconfigFilePath(), e.ClusterConfigLocation); err != nil {
e.T.Fatalf("Failed to apply cluster config: %s", err)
}
}
// WithClusterUpgrade adds a cluster upgrade.
func WithClusterUpgrade(fillers ...api.ClusterFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(api.ClusterToConfigFiller(fillers...))
}
}
// LoadClusterConfigGeneratedByCLI loads the full cluster config from the file generated when a cluster is created using the CLI.
func (e *ClusterE2ETest) LoadClusterConfigGeneratedByCLI(fillers ...api.ClusterConfigFiller) {
fullClusterConfigLocation := filepath.Join(e.ClusterConfigFolder, e.ClusterName+"-eks-a-cluster.yaml")
e.parseClusterConfigFromDisk(fullClusterConfigLocation)
}
// UpgradeClusterWithNewConfig applies the test options, re-generates the cluster config file and runs the CLI upgrade command.
func (e *ClusterE2ETest) UpgradeClusterWithNewConfig(clusterOpts []ClusterE2ETestOpt, commandOpts ...CommandOpt) {
e.upgradeCluster(clusterOpts, commandOpts...)
}
func (e *ClusterE2ETest) upgradeCluster(clusterOpts []ClusterE2ETestOpt, commandOpts ...CommandOpt) {
for _, opt := range clusterOpts {
opt(e)
}
e.buildClusterConfigFile()
e.setFeatureFlagForUnreleasedKubernetesVersion(e.ClusterConfig.Cluster.Spec.KubernetesVersion)
e.UpgradeCluster(commandOpts...)
}
// UpgradeCluster runs the CLI upgrade command.
func (e *ClusterE2ETest) UpgradeCluster(commandOpts ...CommandOpt) {
upgradeClusterArgs := []string{"upgrade", "cluster", "-f", e.ClusterConfigLocation, "-v", "4"}
if getBundlesOverride() == "true" {
upgradeClusterArgs = append(upgradeClusterArgs, "--bundles-override", defaultBundleReleaseManifestFile)
}
e.RunEKSA(upgradeClusterArgs, commandOpts...)
}
func (e *ClusterE2ETest) generateClusterConfigYaml() []byte {
childObjs := e.ClusterConfig.ChildObjects()
yamlB := make([][]byte, 0, len(childObjs)+1)
if e.PackageConfig != nil {
e.ClusterConfig.Cluster.Spec.Packages = e.PackageConfig.packageConfiguration
}
// This is required because Flux requires a namespace be specified for objects
// to be able to reconcile right.
if e.ClusterConfig.Cluster.Namespace == "" {
e.ClusterConfig.Cluster.Namespace = "default"
}
clusterConfigB, err := yaml.Marshal(e.ClusterConfig.Cluster)
if err != nil {
e.T.Fatal(err)
}
yamlB = append(yamlB, clusterConfigB)
for _, o := range childObjs {
// This is required because Flux requires a namespace be specified for objects
// to be able to reconcile right.
if o.GetNamespace() == "" {
o.SetNamespace("default")
}
objB, err := yaml.Marshal(o)
if err != nil {
e.T.Fatalf("Failed marshalling %s config: %v", o.GetName(), err)
}
yamlB = append(yamlB, objB)
}
return templater.AppendYamlResources(yamlB...)
}
func (e *ClusterE2ETest) buildClusterConfigFile() {
yaml := e.generateClusterConfigYaml()
writer, err := filewriter.NewWriter(e.ClusterConfigFolder)
if err != nil {
e.T.Fatalf("Error creating writer: %v", err)
}
writtenFile, err := writer.Write(filepath.Base(e.ClusterConfigLocation), yaml, filewriter.PersistentFile)
if err != nil {
e.T.Fatalf("Error writing cluster config to file %s: %v", e.ClusterConfigLocation, err)
}
e.T.Logf("Written cluster config to %v", writtenFile)
e.ClusterConfigLocation = writtenFile
}
func (e *ClusterE2ETest) DeleteCluster(opts ...CommandOpt) {
e.deleteCluster(opts...)
}
// CleanupVms is a helper to clean up VMs. It is a noop if the T_CLEANUP_VMS environment variable
// is false or unset.
func (e *ClusterE2ETest) CleanupVms() {
if !shouldCleanUpVms() {
e.T.Logf("Skipping VM cleanup")
return
}
if err := e.Provider.CleanupVMs(e.ClusterName); err != nil {
e.T.Logf("failed to clean up VMs: %v", err)
}
}
func (e *ClusterE2ETest) CleanupDockerEnvironment() {
e.T.Logf("cleanup kind enviornment...")
e.Run("kind", "delete", "clusters", "--all", "||", "true")
e.T.Logf("cleanup docker enviornment...")
e.Run("docker", "rm", "-vf", "$(docker ps -a -q)", "||", "true")
}
func shouldCleanUpVms() bool {
shouldCleanupVms, err := getCleanupVmsVar()
return err == nil && shouldCleanupVms
}
func (e *ClusterE2ETest) deleteCluster(opts ...CommandOpt) {
deleteClusterArgs := []string{"delete", "cluster", e.ClusterName, "-v", "4"}
if getBundlesOverride() == "true" {
deleteClusterArgs = append(deleteClusterArgs, "--bundles-override", defaultBundleReleaseManifestFile)
}
e.RunEKSA(deleteClusterArgs, opts...)
}
// GenerateSupportBundleOnCleanupIfTestFailed does what it says on the tin.
//
// It uses testing.T.Cleanup to register a handler that checks if the test
// failed, and generates a support bundle only in the event of a failure.
func (e *ClusterE2ETest) GenerateSupportBundleOnCleanupIfTestFailed(opts ...CommandOpt) {
e.T.Cleanup(func() {
if e.T.Failed() {
e.T.Log("Generating support bundle for failed test")
generateSupportBundleArgs := []string{"generate", "support-bundle", "-f", e.ClusterConfigLocation}
e.RunEKSA(generateSupportBundleArgs, opts...)
}
})
}
func (e *ClusterE2ETest) Run(name string, args ...string) {
command := strings.Join(append([]string{name}, args...), " ")
shArgs := []string{"-c", command}
e.T.Log("Running shell command", "[", command, "]")
cmd := exec.CommandContext(context.Background(), "sh", shArgs...)
envPath := os.Getenv("PATH")
binDir, err := DefaultLocalEKSABinDir()
if err != nil {
e.T.Fatalf("Error finding current directory: %v", err)
}
var stdoutAndErr bytes.Buffer
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%s:%s", binDir, envPath))
cmd.Stderr = io.MultiWriter(os.Stderr, &stdoutAndErr)
cmd.Stdout = io.MultiWriter(os.Stdout, &stdoutAndErr)
if err = cmd.Run(); err != nil {
e.T.Log("Command failed, scanning output for error")
scanner := bufio.NewScanner(&stdoutAndErr)
var errorMessage string
// Look for the last line of the out put that starts with 'Error:'
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "Error:") {
errorMessage = line
}
}
if err := scanner.Err(); err != nil {
e.T.Fatalf("Failed reading command output looking for error message: %v", err)
}
if errorMessage != "" {
if e.ExpectFailure {
e.T.Logf("This error was expected. Continuing...")
return
}
e.T.Fatalf("Command %s %v failed with error: %v: %s", name, args, err, errorMessage)
}
e.T.Fatalf("Error running command %s %v: %v", name, args, err)
}
}
func (e *ClusterE2ETest) RunEKSA(args []string, opts ...CommandOpt) {
binaryPath := e.eksaBinaryLocation
for _, o := range opts {
err := o(&binaryPath, &args)
if err != nil {
e.T.Fatalf("Error executing EKS-A at path %s with args %s: %v", binaryPath, args, err)
}
}
e.Run(binaryPath, args...)
}
func (e *ClusterE2ETest) StopIfFailed() {
if e.T.Failed() {
e.T.FailNow()
}
}
func (e *ClusterE2ETest) cleanup(f func()) {
e.T.Cleanup(func() {
if !e.T.Failed() {
f()
}
})
}
// Cluster builds a cluster obj using the ClusterE2ETest name and kubeconfig.
func (e *ClusterE2ETest) Cluster() *types.Cluster {
return &types.Cluster{
Name: e.ClusterName,
KubeconfigFile: e.KubeconfigFilePath(),
}
}
func (e *ClusterE2ETest) managementCluster() *types.Cluster {
return &types.Cluster{
Name: e.ClusterConfig.Cluster.ManagedBy(),
KubeconfigFile: e.managementKubeconfigFilePath(),
}
}
// KubeconfigFilePath retrieves the Kubeconfig path used for the workload cluster.
func (e *ClusterE2ETest) KubeconfigFilePath() string {
return filepath.Join(e.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", e.ClusterName))
}
// BuildWorkloadClusterClient creates a client for the workload cluster created by e.
func (e *ClusterE2ETest) BuildWorkloadClusterClient() (client.Client, error) {
var clusterClient client.Client
// Adding the retry logic here because the connection to the client does not always
// succedd on the first try due to connection failure after the kubeconfig becomes
// available in the cluster.
err := retrier.Retry(12, 5*time.Second, func() error {
c, err := kubernetes.NewRuntimeClientFromFileName(e.KubeconfigFilePath())
if err != nil {
return fmt.Errorf("failed to build cluster client: %v", err)
}
clusterClient = c
return nil
})
return clusterClient, err
}
func (e *ClusterE2ETest) managementKubeconfigFilePath() string {
clusterConfig := e.ClusterConfig.Cluster
if clusterConfig.IsSelfManaged() {
return e.KubeconfigFilePath()
}
managementClusterName := e.ClusterConfig.Cluster.ManagedBy()
return filepath.Join(managementClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", managementClusterName))
}
func (e *ClusterE2ETest) GetEksaVSphereMachineConfigs() []v1alpha1.VSphereMachineConfig {
clusterConfig := e.ClusterConfig.Cluster
machineConfigNames := make([]string, 0, len(clusterConfig.Spec.WorkerNodeGroupConfigurations)+1)
machineConfigNames = append(machineConfigNames, clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name)
for _, workerNodeConf := range clusterConfig.Spec.WorkerNodeGroupConfigurations {
machineConfigNames = append(machineConfigNames, workerNodeConf.MachineGroupRef.Name)
}
kubeconfig := e.KubeconfigFilePath()
ctx := context.Background()
machineConfigs := make([]v1alpha1.VSphereMachineConfig, 0, len(machineConfigNames))
for _, name := range machineConfigNames {
m, err := e.KubectlClient.GetEksaVSphereMachineConfig(ctx, name, kubeconfig, clusterConfig.Namespace)
if err != nil {
e.T.Fatalf("Failed getting VSphereMachineConfig: %v", err)
}
machineConfigs = append(machineConfigs, *m)
}
return machineConfigs
}
func GetTestNameHash(name string) string {
h := sha1.New()
h.Write([]byte(name))
testNameHash := fmt.Sprintf("%x", h.Sum(nil))
return testNameHash[:7]
}
func getClusterName(t T) string {
value := os.Getenv(ClusterPrefixVar)
// Append hash to make each cluster name unique per test. Using the testname will be too long
// and would fail validations
if len(value) == 0 {
value = defaultClusterName
}
return fmt.Sprintf("%s-%s", value, GetTestNameHash(t.Name()))
}
func getBundlesOverride() string {
return os.Getenv(BundlesOverrideVar)
}
func getCleanupVmsVar() (bool, error) {
return strconv.ParseBool(os.Getenv(CleanupVmsVar))
}
func setEksctlVersionEnvVar() error {
eksctlVersionEnv := os.Getenv(eksctlVersionEnvVar)
if eksctlVersionEnv == "" {
err := os.Setenv(eksctlVersionEnvVar, eksctlVersionEnvVarDummyVal)
if err != nil {
return fmt.Errorf(
"couldn't set eksctl version env var %s to value %s",
eksctlVersionEnvVar,
eksctlVersionEnvVarDummyVal,
)
}
}
return nil
}
func (e *ClusterE2ETest) InstallHelmChart() {
kubeconfig := e.KubeconfigFilePath()
ctx := context.Background()
err := e.HelmInstallConfig.HelmClient.InstallChart(ctx, e.HelmInstallConfig.chartName, e.HelmInstallConfig.chartURI, e.HelmInstallConfig.chartVersion, kubeconfig, "", "", false, e.HelmInstallConfig.chartValues)
if err != nil {
e.T.Fatalf("Error installing %s helm chart on the cluster: %v", e.HelmInstallConfig.chartName, err)
}
}
// CreateNamespace creates a namespace.
func (e *ClusterE2ETest) CreateNamespace(namespace string) {
kubeconfig := e.KubeconfigFilePath()
err := e.KubectlClient.CreateNamespace(context.Background(), kubeconfig, namespace)
if err != nil {
e.T.Fatalf("Namespace creation failed for %s", namespace)
}
}
// DeleteNamespace deletes a namespace.
func (e *ClusterE2ETest) DeleteNamespace(namespace string) {
kubeconfig := e.KubeconfigFilePath()
err := e.KubectlClient.DeleteNamespace(context.Background(), kubeconfig, namespace)
if err != nil {
e.T.Fatalf("Namespace deletion failed for %s", namespace)
}
}
// SetPackageBundleActive will set the current packagebundle to the active state.
func (e *ClusterE2ETest) SetPackageBundleActive() {
kubeconfig := e.KubeconfigFilePath()
pbc, err := e.KubectlClient.GetPackageBundleController(context.Background(), kubeconfig, e.ClusterName)
if err != nil {
e.T.Fatalf("Error getting PackageBundleController: %v", err)
}
pb, err := e.KubectlClient.GetPackageBundleList(context.Background(), e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("Error getting PackageBundle: %v", err)
}
os.Setenv("KUBECONFIG", kubeconfig)
if pbc.Spec.ActiveBundle != pb[0].ObjectMeta.Name {
e.RunEKSA([]string{
"upgrade", "packages",
"--bundle-version", pb[0].ObjectMeta.Name, "-v=9",
"--cluster=" + e.ClusterName,
})
}
}
// ValidatingNoPackageController make sure there is no package controller.
func (e *ClusterE2ETest) ValidatingNoPackageController() {
kubeconfig := e.KubeconfigFilePath()
_, err := e.KubectlClient.GetPackageBundleController(context.Background(), kubeconfig, e.ClusterName)
if err == nil {
e.T.Fatalf("Error unexpected PackageBundleController: %v", err)
}
}
// InstallCuratedPackage will install a curated package.
func (e *ClusterE2ETest) InstallCuratedPackage(packageName, packagePrefix, kubeconfig string, opts ...string) {
os.Setenv("CURATED_PACKAGES_SUPPORT", "true")
// The package install command doesn't (yet?) have a --kubeconfig flag.
os.Setenv("KUBECONFIG", kubeconfig)
e.RunEKSA([]string{
"install", "package", packageName,
"--package-name=" + packagePrefix, "-v=9",
"--cluster=" + e.ClusterName,
strings.Join(opts, " "),
})
}
// InstallCuratedPackageFile will install a curated package from a yaml file, this is useful since target namespace isn't supported on the CLI.
func (e *ClusterE2ETest) InstallCuratedPackageFile(packageFile, kubeconfig string, opts ...string) {
os.Setenv("CURATED_PACKAGES_SUPPORT", "true")
os.Setenv("KUBECONFIG", kubeconfig)
e.T.Log("Installing EKS-A Packages file", packageFile)
e.RunEKSA([]string{
"apply", "package", "-f", packageFile, "-v=9", strings.Join(opts, " "),
})
}
func (e *ClusterE2ETest) generatePackageConfig(ns, targetns, prefix, packageName string) []byte {
yamlB := make([][]byte, 0, 4)
generatedName := fmt.Sprintf("%s-%s", prefix, packageName)
if targetns == "" {
targetns = ns
}
ns = fmt.Sprintf("%s-%s", ns, e.ClusterName)
builtpackage := &packagesv1.Package{
TypeMeta: metav1.TypeMeta{
Kind: packagesv1.PackageKind,
APIVersion: "packages.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: generatedName,
Namespace: ns,
},
Spec: packagesv1.PackageSpec{
PackageName: packageName,
TargetNamespace: targetns,
},
}
builtpackageB, err := yaml.Marshal(builtpackage)
if err != nil {
e.T.Fatalf("marshalling package config file: %v", err)
}
yamlB = append(yamlB, builtpackageB)
return templater.AppendYamlResources(yamlB...)
}
// BuildPackageConfigFile will create the file in the test directory for the curated package.
func (e *ClusterE2ETest) BuildPackageConfigFile(packageName, prefix, ns string) string {
b := e.generatePackageConfig(ns, ns, prefix, packageName)
writer, err := filewriter.NewWriter(e.ClusterConfigFolder)
if err != nil {
e.T.Fatalf("Error creating writer: %v", err)
}
packageFile := fmt.Sprintf("%s.yaml", packageName)
writtenFile, err := writer.Write(packageFile, b, filewriter.PersistentFile)
if err != nil {
e.T.Fatalf("Error writing cluster config to file %s: %v", e.ClusterConfigLocation, err)
}
return writtenFile
}
func (e *ClusterE2ETest) CreateResource(ctx context.Context, resource string) {
err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), []byte(resource))
if err != nil {
e.T.Fatalf("Failed to create required resource (%s): %v", resource, err)
}
}
func (e *ClusterE2ETest) UninstallCuratedPackage(packagePrefix string, opts ...string) {
e.RunEKSA([]string{
"delete", "package", packagePrefix, "-v=9",
"--cluster=" + e.ClusterName,
strings.Join(opts, " "),
})
}
func (e *ClusterE2ETest) InstallLocalStorageProvisioner() {
ctx := context.Background()
_, err := e.KubectlClient.ExecuteCommand(ctx, "apply", "-f",
"https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml",
"--kubeconfig", e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("Error installing local-path-provisioner: %v", err)
}
}
// WithCluster helps with bringing up and tearing down E2E test clusters.
func (e *ClusterE2ETest) WithCluster(f func(e *ClusterE2ETest)) {
e.GenerateClusterConfig()
e.CreateCluster()
defer e.DeleteCluster()
f(e)
}
// Like WithCluster but does not delete the cluster. Useful for debugging.
func (e *ClusterE2ETest) WithPersistentCluster(f func(e *ClusterE2ETest)) {
configPath := e.KubeconfigFilePath()
if _, err := os.Stat(configPath); os.IsNotExist(err) {
e.GenerateClusterConfig()
e.CreateCluster()
}
f(e)
}
// VerifyHarborPackageInstalled is checking if the harbor package gets installed correctly.
func (e *ClusterE2ETest) VerifyHarborPackageInstalled(prefix string, namespace string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
deployments := []string{"core", "jobservice", "nginx", "portal", "registry"}
statefulsets := []string{"database", "redis", "trivy"}
var wg sync.WaitGroup
wg.Add(len(deployments) + len(statefulsets))
errCh := make(chan error, 1)
okCh := make(chan string, 1)
time.Sleep(5 * time.Minute)
// Log Package/Deployment outputs
defer func() {
e.printDeploymentSpec(ctx, namespace)
}()
for _, name := range deployments {
go func(name string) {
defer wg.Done()
err := e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", fmt.Sprintf("%s-harbor-%s", prefix, name), namespace)
if err != nil {
errCh <- err
}
}(name)
}
for _, name := range statefulsets {
go func(name string) {
defer wg.Done()
err := e.KubectlClient.Wait(ctx, e.KubeconfigFilePath(), "20m", "Ready",
fmt.Sprintf("pods/%s-harbor-%s-0", prefix, name), namespace)
if err != nil {
errCh <- err
}
}(name)
}
go func() {
wg.Wait()
okCh <- "completed"
}()
select {
case err := <-errCh:
e.T.Fatal(err)
case <-okCh:
return
}
}
func (e *ClusterE2ETest) printPackageSpec(ctx context.Context, params []string) {
bytes, _ := e.KubectlClient.Execute(ctx, params...)
response := &packagesv1.Package{}
_ = json.Unmarshal(bytes.Bytes(), response)
formatted, _ := json.MarshalIndent(response, "", " ")
fmt.Println(string(formatted))
}
func (e *ClusterE2ETest) printDeploymentSpec(ctx context.Context, ns string) {
response, _ := e.KubectlClient.GetDeployments(ctx,
executables.WithKubeconfig(e.managementKubeconfigFilePath()),
executables.WithNamespace(ns),
)
formatted, _ := json.MarshalIndent(response, "", " ")
fmt.Println(string(formatted))
}
// VerifyHelloPackageInstalled is checking if the hello eks anywhere package gets installed correctly.
func (e *ClusterE2ETest) VerifyHelloPackageInstalled(packageName string, mgmtCluster *types.Cluster) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
e.GenerateSupportBundleOnCleanupIfTestFailed()
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, constants.EksaPackagesName)
}()
e.T.Log("Waiting for Package", packageName, "To be installed")
err := e.KubectlClient.WaitForPackagesInstalled(ctx,
mgmtCluster, packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for hello-eks-anywhere package timed out: %s", err)
}
e.T.Log("Waiting for Package", packageName, "Deployment to be healthy")
err = e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", "hello-eks-anywhere", constants.EksaPackagesName)
if err != nil {
e.T.Fatalf("waiting for hello-eks-anywhere deployment timed out: %s", err)
}
svcAddress := packageName + "." + constants.EksaPackagesName + ".svc.cluster.local"
e.T.Log("Validate content at endpoint", svcAddress)
expectedLogs := "Amazon EKS Anywhere"
e.ValidateEndpointContent(svcAddress, constants.EksaPackagesName, expectedLogs)
}
// VerifyAdotPackageInstalled is checking if the ADOT package gets installed correctly.
func (e *ClusterE2ETest) VerifyAdotPackageInstalled(packageName string, targetNamespace string) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
e.GenerateSupportBundleOnCleanupIfTestFailed()
e.T.Log("Waiting for package", packageName, "to be installed")
err := e.KubectlClient.WaitForPackagesInstalled(ctx,
e.Cluster(), packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for adot package install timed out: %s", err)
}
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, targetNamespace)
}()
e.T.Log("Waiting for package", packageName, "deployment to be available")
err = e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", fmt.Sprintf("%s-aws-otel-collector", packageName), targetNamespace)
if err != nil {
e.T.Fatalf("waiting for adot deployment timed out: %s", err)
}
e.T.Log("Reading", packageName, "pod logs")
adotPodName, err := e.KubectlClient.GetPodNameByLabel(context.TODO(), targetNamespace, "app.kubernetes.io/name=aws-otel-collector", e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("unable to get name of the aws-otel-collector pod: %s", err)
}
expectedLogs := "Everything is ready"
e.MatchLogs(targetNamespace, adotPodName, "aws-otel-collector", expectedLogs, 5*time.Minute)
podIPAddress, err := e.KubectlClient.GetPodIP(context.TODO(), targetNamespace, adotPodName, e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("unable to get ip of the aws-otel-collector pod: %s", err)
}
podFullIPAddress := strings.Trim(podIPAddress, `'"`) + ":8888/metrics"
e.T.Log("Validate content at endpoint", podFullIPAddress)
expectedLogs = "otelcol_exporter"
e.ValidateEndpointContent(podFullIPAddress, targetNamespace, expectedLogs)
}
//go:embed testdata/adot_package_deployment.yaml
var adotPackageDeployment []byte
//go:embed testdata/adot_package_daemonset.yaml
var adotPackageDaemonset []byte
// VerifyAdotPackageDeploymentUpdated is checking if deployment config changes trigger resource reloads correctly.
func (e *ClusterE2ETest) VerifyAdotPackageDeploymentUpdated(packageName string, targetNamespace string) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
// Deploy ADOT as a deployment and scrape the apiservers
e.T.Log("Apply changes to package", packageName)
e.T.Log("This will update", packageName, "to be a deployment, and scrape the apiservers")
err := e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), adotPackageDeployment, packageMetadatNamespace)
if err != nil {
e.T.Fatalf("Error upgrading adot package: %s", err)
return
}
time.Sleep(30 * time.Second) // Add sleep to allow package to change state
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, targetNamespace)
}()
e.T.Log("Waiting for package", packageName, "to be updated")
err = e.KubectlClient.WaitForPackagesInstalled(ctx,
e.Cluster(), packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for adot package update timed out: %s", err)
}
e.T.Log("Waiting for package", packageName, "deployment to be available")
err = e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", fmt.Sprintf("%s-aws-otel-collector", packageName), targetNamespace)
if err != nil {
e.T.Fatalf("waiting for adot deployment timed out: %s", err)
}
e.T.Log("Reading", packageName, "pod logs")
adotPodName, err := e.KubectlClient.GetPodNameByLabel(context.TODO(), targetNamespace, "app.kubernetes.io/name=aws-otel-collector", e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("unable to get name of the aws-otel-collector pod: %s", err)
}
logs, err := e.KubectlClient.GetPodLogs(context.TODO(), targetNamespace, adotPodName, "aws-otel-collector", e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("failure getting pod logs %s", err)
}
fmt.Printf("Logs from aws-otel-collector pod\n %s\n", logs)
expectedLogs := "Everything is ready"
ok := strings.Contains(logs, expectedLogs)
if !ok {
e.T.Fatalf("expected to find %s in the log, got %s", expectedLogs, logs)
}
}
// VerifyAdotPackageDaemonSetUpdated is checking if daemonset config changes trigger resource reloads correctly.
func (e *ClusterE2ETest) VerifyAdotPackageDaemonSetUpdated(packageName string, targetNamespace string) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
// Deploy ADOT as a daemonset and scrape the node
e.T.Log("Apply changes to package", packageName)
e.T.Log("This will update", packageName, "to be a daemonset, and scrape the node")
err := e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), adotPackageDaemonset, packageMetadatNamespace)
if err != nil {
e.T.Fatalf("Error upgrading adot package: %s", err)
return
}
time.Sleep(30 * time.Second) // Add sleep to allow package to change state
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, targetNamespace)
}()
e.T.Log("Waiting for package", packageName, "to be updated")
err = e.KubectlClient.WaitForPackagesInstalled(ctx,
e.Cluster(), packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for adot package update timed out: %s", err)
}
e.T.Log("Waiting for package", packageName, "daemonset to be rolled out")
err = retrier.New(6 * time.Minute).Retry(func() error {
return e.KubectlClient.WaitForResourceRolledout(ctx,
e.Cluster(), "20m", fmt.Sprintf("%s-aws-otel-collector-agent", packageName), targetNamespace, "daemonset")
})
if err != nil {
e.T.Fatalf("waiting for adot daemonset timed out: %s", err)
}
e.T.Log("Reading", packageName, "pod logs")
adotPodName, err := e.KubectlClient.GetPodNameByLabel(context.TODO(), targetNamespace, "app.kubernetes.io/name=aws-otel-collector", e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("unable to get name of the aws-otel-collector pod: %s", err)
}
expectedLogs := "Everything is ready"
err = retrier.New(5 * time.Minute).Retry(func() error {
logs, err := e.KubectlClient.GetPodLogs(context.TODO(), targetNamespace, adotPodName, "aws-otel-collector", e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("failure getting pod logs %s", err)
}
fmt.Printf("Logs from aws-otel-collector pod\n %s\n", logs)
ok := strings.Contains(logs, expectedLogs)
if !ok {
return fmt.Errorf("expected to find %s in the log, got %s", expectedLogs, logs)
}
return nil
})
if err != nil {
e.T.Fatalf("unable to finish log comparison: %s", err)
}
}
//go:embed testdata/emissary_listener.yaml
var emisarryListener []byte
//go:embed testdata/emissary_package.yaml
var emisarryPackage []byte
// VerifyEmissaryPackageInstalled is checking if emissary package gets installed correctly.
func (e *ClusterE2ETest) VerifyEmissaryPackageInstalled(packageName string, mgmtCluster *types.Cluster) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
e.T.Log("Waiting for Package", packageName, "To be installed")
err := e.KubectlClient.WaitForPackagesInstalled(ctx,
mgmtCluster, packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for emissary package timed out: %s", err)
}
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, constants.EksaPackagesName)
}()
e.T.Log("Waiting for Package", packageName, "Deployment to be healthy")
err = e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", packageName, constants.EksaPackagesName)
if err != nil {
e.T.Fatalf("waiting for emissary deployment timed out: %s", err)
}
svcAddress := packageName + "-admin." + constants.EksaPackagesName + ".svc.cluster.local" + ":8877/ambassador/v0/check_alive"
e.T.Log("Validate content at endpoint", svcAddress)
expectedLogs := "Ambassador is alive and well"
e.ValidateEndpointContent(svcAddress, constants.EksaPackagesName, expectedLogs)
}
// TestEmissaryPackageRouting is checking if emissary is able to create Ingress, host, and mapping that function correctly.
func (e *ClusterE2ETest) TestEmissaryPackageRouting(packageName string, mgmtCluster *types.Cluster) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), emisarryPackage)
if err != nil {
e.T.Errorf("Error upgrading emissary package: %v", err)
return
}
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, constants.EksaPackagesName)
}()
e.T.Log("Waiting for Package", packageName, "To be upgraded")
err = e.KubectlClient.WaitForPackagesInstalled(ctx,
mgmtCluster, packageName, "20m", fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName))
if err != nil {
e.T.Fatalf("waiting for emissary package upgrade timed out: %s", err)
}
err = e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), emisarryListener)
if err != nil {
e.T.Errorf("Error applying roles for oids: %v", err)
return
}
// Functional testing of Emissary Ingress
ingresssvcAddress := packageName + "." + constants.EksaPackagesName + ".svc.cluster.local" + "/backend/"
e.T.Log("Validate content at endpoint", ingresssvcAddress)
expectedLogs := "quote"
e.ValidateEndpointContent(ingresssvcAddress, constants.EksaPackagesName, expectedLogs)
}
// VerifyPrometheusPackageInstalled is checking if the Prometheus package gets installed correctly.
func (e *ClusterE2ETest) VerifyPrometheusPackageInstalled(packageName string, targetNamespace string) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
e.T.Log("Waiting for package", packageName, "to be installed")
err := e.KubectlClient.WaitForPackagesInstalled(ctx,
e.Cluster(), packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for prometheus package install timed out: %s", err)
}
}
// VerifyCertManagerPackageInstalled is checking if the cert manager package gets installed correctly.
func (e *ClusterE2ETest) VerifyCertManagerPackageInstalled(prefix string, namespace string, packageName string, mgmtCluster *types.Cluster) {
ctx, cancel := context.WithCancel(context.Background())
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
defer cancel()
deployments := []string{"cert-manager", "cert-manager-cainjector", "cert-manager-webhook"}
var wg sync.WaitGroup
errCh := make(chan error, 1)
okCh := make(chan string, 1)
e.T.Log("Waiting for Package", packageName, "To be installed")
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, namespace)
}()
err := e.KubectlClient.WaitForPackagesInstalled(ctx,
mgmtCluster, prefix+"-"+packageName, "5m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for cert-manager package timed out: %s", err)
}
e.T.Log("Waiting for Package", packageName, "Deployment to be healthy")
for _, name := range deployments {
wg.Add(1)
go func(name string) {
defer wg.Done()
err := e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", fmt.Sprintf("%s-%s", prefix, name), namespace)
if err != nil {
errCh <- err
}
}(name)
}
e.T.Log("Waiting for Self Signed certificate to be issued")
err = e.verifySelfSignedCertificate(mgmtCluster)
if err != nil {
errCh <- err
}
e.T.Log("Waiting for Let's Encrypt certificate to be issued")
err = e.verifyLetsEncryptCert(mgmtCluster)
if err != nil {
errCh <- err
}
go func() {
wg.Wait()
okCh <- "completed"
}()
select {
case err := <-errCh:
e.T.Fatal(err)
case <-okCh:
return
}
}
//go:embed testdata/certmanager/certmanager_selfsignedissuer.yaml
var certManagerSelfSignedIssuer []byte
//go:embed testdata/certmanager/certmanager_selfsignedcert.yaml
var certManagerSelfSignedCert []byte
func (e *ClusterE2ETest) verifySelfSignedCertificate(mgmtCluster *types.Cluster) error {
ctx := context.Background()
selfsignedCert := "my-selfsigned-ca"
err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), certManagerSelfSignedIssuer)
if err != nil {
return fmt.Errorf("error installing Cluster issuer for cert manager: %v", err)
}
err = e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), certManagerSelfSignedCert)
if err != nil {
return fmt.Errorf("error applying certificate for cert manager: %v", err)
}
err = e.KubectlClient.WaitJSONPathLoop(ctx, e.Cluster().KubeconfigFile, "5m", "status.conditions[?(@.type=='Ready')].status", "True",
fmt.Sprintf("certificates.cert-manager.io/%s", selfsignedCert), constants.EksaPackagesName)
if err != nil {
return fmt.Errorf("failed to issue a self signed certificate: %v", err)
}
return nil
}
//go:embed testdata/certmanager/certmanager_letsencrypt_issuer.yaml
var certManagerLetsEncryptIssuer string
//go:embed testdata/certmanager/certmanager_letsencrypt_cert.yaml
var certManagerLetsEncryptCert []byte
//go:embed testdata/certmanager/certmanager_secret.yaml
var certManagerSecret string
func (e *ClusterE2ETest) verifyLetsEncryptCert(mgmtCluster *types.Cluster) error {
ctx := context.Background()
letsEncryptCert := "test-cert"
accessKey, secretAccess, region, zoneID := GetRoute53Configs()
data := map[string]interface{}{
"route53SecretAccessKey": secretAccess,
}
certManagerSecretData, err := templater.Execute(certManagerSecret, data)
if err != nil {
return fmt.Errorf("failed creating cert manager secret: %v", err)
}
err = e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), certManagerSecretData)
if err != nil {
return fmt.Errorf("error creating cert manager secret: %v", err)
}
data = map[string]interface{}{
"route53AccessKeyId": accessKey,
"route53ZoneId": zoneID,
"route53Region": region,
}
certManagerIssuerData, err := templater.Execute(certManagerLetsEncryptIssuer, data)
if err != nil {
return fmt.Errorf("failed creating lets encrypt issuer: %v", err)
}
err = e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), certManagerIssuerData)
if err != nil {
return fmt.Errorf("error creating cert manager let's encrypt issuer: %v", err)
}
err = e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), certManagerLetsEncryptCert)
if err != nil {
return fmt.Errorf("error creating cert manager let's encrypt issuer: %v", err)
}
err = e.KubectlClient.WaitJSONPathLoop(ctx, e.Cluster().KubeconfigFile, "5m", "status.conditions[?(@.type=='Ready')].status", "True",
fmt.Sprintf("certificates.cert-manager.io/%s", letsEncryptCert), constants.EksaPackagesName)
if err != nil {
return fmt.Errorf("failed to issue a self signed certificate: %v", err)
}
return nil
}
// VerifyPrometheusPrometheusServerStates is checking if the Prometheus package prometheus-server component is functioning properly.
func (e *ClusterE2ETest) VerifyPrometheusPrometheusServerStates(packageName string, targetNamespace string, mode string) {
ctx := context.Background()
e.T.Log("Waiting for package", packageName, mode, "prometheus-server to be rolled out")
err := retrier.New(6 * time.Minute).Retry(func() error {
return e.KubectlClient.WaitForResourceRolledout(ctx,
e.Cluster(), "5m", fmt.Sprintf("%s-server", packageName), targetNamespace, mode)
})
if err != nil {
e.T.Fatalf("waiting for prometheus-server %s timed out: %s", mode, err)
}
e.T.Log("Reading package", packageName, "pod prometheus-server logs")
podName, err := e.KubectlClient.GetPodNameByLabel(context.TODO(), targetNamespace, "app=prometheus,component=server", e.KubeconfigFilePath())
if err != nil {
e.T.Fatalf("unable to get name of the prometheus-server pod: %s", err)
}
expectedLogs := "Server is ready to receive web requests"
e.MatchLogs(targetNamespace, podName, "prometheus-server", expectedLogs, 5*time.Minute)
}
// VerifyPrometheusNodeExporterStates is checking if the Prometheus package node-exporter component is functioning properly.
func (e *ClusterE2ETest) VerifyPrometheusNodeExporterStates(packageName string, targetNamespace string) {
ctx := context.Background()
e.T.Log("Waiting for package", packageName, "daemonset node-exporter to be rolled out")
err := retrier.New(6 * time.Minute).Retry(func() error {
return e.KubectlClient.WaitForResourceRolledout(ctx,
e.Cluster(), "5m", fmt.Sprintf("%s-node-exporter", packageName), targetNamespace, "daemonset")
})
if err != nil {
e.T.Fatalf("waiting for prometheus daemonset timed out: %s", err)
}
svcAddress := packageName + "-node-exporter." + targetNamespace + ".svc.cluster.local" + ":9100/metrics"
e.T.Log("Validate content at endpoint", svcAddress)
expectedLogs := "HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles"
e.ValidateEndpointContent(svcAddress, targetNamespace, expectedLogs)
}
//go:embed testdata/prometheus_package_deployment.yaml
var prometheusPackageDeployment []byte
//go:embed testdata/prometheus_package_statefulset.yaml
var prometheusPackageStatefulSet []byte
// ApplyPrometheusPackageServerDeploymentFile is checking if deployment config changes trigger resource reloads correctly.
func (e *ClusterE2ETest) ApplyPrometheusPackageServerDeploymentFile(packageName string, targetNamespace string) {
e.T.Log("Update", packageName, "to be a deployment, and scrape the api-servers")
e.ApplyPackageFile(packageName, targetNamespace, prometheusPackageDeployment)
}
// ApplyPrometheusPackageServerStatefulSetFile is checking if statefulset config changes trigger resource reloads correctly.
func (e *ClusterE2ETest) ApplyPrometheusPackageServerStatefulSetFile(packageName string, targetNamespace string) {
e.T.Log("Update", packageName, "to be a statefulset, and scrape the api-servers")
e.ApplyPackageFile(packageName, targetNamespace, prometheusPackageStatefulSet)
}
// VerifyPackageControllerNotInstalled is verifying that package controller is not installed.
func (e *ClusterE2ETest) VerifyPackageControllerNotInstalled() {
ctx := context.Background()
packageDeployment := "eks-anywhere-packages"
_, err := e.KubectlClient.GetDeployment(ctx, packageDeployment, constants.EksaPackagesName, e.Cluster().KubeconfigFile)
if !apierrors.IsNotFound(err) {
e.T.Fatalf("found deployment for package controller in workload cluster %s : %s", e.ClusterName, err)
}
}
// VerifyAutoScalerPackageInstalled is verifying that the autoscaler package is installed and deployed.
func (e *ClusterE2ETest) VerifyAutoScalerPackageInstalled(packageName string, targetNamespace string, mgmtCluster *types.Cluster) {
ctx := context.Background()
deploymentName := "cluster-autoscaler-clusterapi-cluster-autoscaler"
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
e.T.Log("Waiting for Package", packageName, "To be installed")
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, targetNamespace)
}()
err := e.KubectlClient.WaitForPackagesInstalled(ctx,
mgmtCluster, packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for Autoscaler Package to be avaliable")
}
e.T.Log("Waiting for Package", packageName, "Deployment to be healthy")
err = e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", deploymentName, targetNamespace)
if err != nil {
e.T.Fatalf("waiting for cluster-autoscaler deployment timed out: %s", err)
}
}
// VerifyMetricServerPackageInstalled is verifying that metrics-server is installed and deployed.
func (e *ClusterE2ETest) VerifyMetricServerPackageInstalled(packageName string, targetNamespace string, mgmtCluster *types.Cluster) {
ctx := context.Background()
deploymentName := "metrics-server"
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
e.T.Log("Waiting for Package", packageName, "To be installed")
// Log Package/Deployment outputs
defer func() {
params := []string{"get", "package", packageName, "-o", "json", "-n", packageMetadatNamespace, "--kubeconfig", e.KubeconfigFilePath()}
e.printPackageSpec(ctx, params)
e.printDeploymentSpec(ctx, targetNamespace)
}()
err := e.KubectlClient.WaitForPackagesInstalled(ctx,
mgmtCluster, packageName, "20m", packageMetadatNamespace)
if err != nil {
e.T.Fatalf("waiting for Metric Server Package to be avaliable")
}
e.T.Log("Waiting for Package", packageName, "Deployment to be healthy")
err = e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "20m", "Available", deploymentName, targetNamespace)
if err != nil {
e.T.Fatalf("waiting for Metric Server deployment timed out: %s", err)
}
}
//go:embed testdata/autoscaler_package.yaml
var autoscalerPackageDeploymentTemplate string
//go:embed testdata/metrics_server_package.yaml
var metricsServerPackageDeploymentTemplate string
// InstallAutoScalerWithMetricServer installs autoscaler and metrics-server with a given target namespace.
func (e *ClusterE2ETest) InstallAutoScalerWithMetricServer(targetNamespace string) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName)
data := map[string]interface{}{
"targetNamespace": targetNamespace,
"clusterName": e.Cluster().Name,
}
metricsServerPackageDeployment, err := templater.Execute(metricsServerPackageDeploymentTemplate, data)
if err != nil {
e.T.Fatalf("Failed creating metrics-erver Package Deployment: %s", err)
}
err = e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), metricsServerPackageDeployment,
packageMetadatNamespace)
if err != nil {
e.T.Fatalf("Error installing metrics-sserver pacakge: %s", err)
}
autoscalerPackageDeployment, err := templater.Execute(autoscalerPackageDeploymentTemplate, data)
if err != nil {
e.T.Fatalf("Failed creating autoscaler Package Deployment: %s", err)
}
err = e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), autoscalerPackageDeployment,
packageMetadatNamespace)
if err != nil {
e.T.Fatalf("Error installing cluster autoscaler pacakge: %s", err)
}
}
// CombinedAutoScalerMetricServerTest verifies that new nodes are spun up after using a HPA to scale a deployment.
func (e *ClusterE2ETest) CombinedAutoScalerMetricServerTest(autoscalerName string, metricServerName string, targetNamespace string, mgmtCluster *types.Cluster) {
ctx := context.Background()
ns := "default"
name := "hpa-busybox-test"
machineDeploymentName := e.ClusterName + "-" + "md-0"
e.VerifyMetricServerPackageInstalled(metricServerName, targetNamespace, mgmtCluster)
e.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster)
e.T.Log("Metrics Server and Cluster Autoscaler ready")
err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, mgmtCluster, hpaBusybox)
if err != nil {
e.T.Fatalf("Failed to apply hpa busybox load %s", err)
}
e.T.Log("Deploying test workload")
err = e.KubectlClient.WaitForDeployment(ctx,
e.Cluster(), "5m", "Available", name, ns)
if err != nil {
e.T.Fatalf("Failed waiting for test workload deployent %s", err)
}
params := []string{"autoscale", "deployment", name, "--cpu-percent=50", "--min=1", "--max=20", "--kubeconfig", e.KubeconfigFilePath()}
_, err = e.KubectlClient.ExecuteCommand(ctx, params...)
if err != nil {
e.T.Fatalf("Failed to autoscale deployent: %s", err)
}
e.T.Log("Waiting for machinedeployment to begin scaling up")
err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "20m", "status.phase", "ScalingUp",
fmt.Sprintf("machinedeployments.cluster.x-k8s.io/%s", machineDeploymentName), constants.EksaSystemNamespace)
if err != nil {
e.T.Fatalf("Failed to get ScalingUp phase for machinedeployment: %s", err)
}
e.T.Log("Waiting for machinedeployment to finish scaling up")
err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "15m", "status.phase", "Running",
fmt.Sprintf("machinedeployments.cluster.x-k8s.io/%s", machineDeploymentName), constants.EksaSystemNamespace)
if err != nil {
e.T.Fatalf("Failed to get Running phase for machinedeployment: %s", err)
}
err = e.KubectlClient.WaitForMachineDeploymentReady(ctx, mgmtCluster, "2m",
machineDeploymentName)
if err != nil {
e.T.Fatalf("Machine deployment stuck in scaling up: %s", err)
}
e.T.Log("Finished scaling up machines")
}
// ValidateClusterState runs a set of validations against the cluster to identify an invalid cluster state.
func (e *ClusterE2ETest) ValidateClusterState() {
validateClusterState(e.T.(*testing.T), e)
}
// ValidateClusterStateWithT runs a set of validations against the cluster to identify an invalid cluster state and accepts *testing.T as a parameter.
func (e *ClusterE2ETest) ValidateClusterStateWithT(t *testing.T) {
validateClusterState(t, e)
}
func validateClusterState(t *testing.T, e *ClusterE2ETest) {
t.Logf("Validating cluster %s", e.ClusterName)
ctx := context.Background()
e.buildClusterStateValidationConfig(ctx)
clusterStateValidator := newClusterStateValidator(e.clusterStateValidationConfig)
clusterStateValidator.WithValidations(validationsForExpectedObjects()...)
clusterStateValidator.WithValidations(e.Provider.ClusterStateValidations()...)
if err := clusterStateValidator.Validate(ctx); err != nil {
e.T.Fatalf("failed to validate cluster %v", err)
}
}
// ApplyPackageFile is applying a package file in the cluster.
func (e *ClusterE2ETest) ApplyPackageFile(packageName string, targetNamespace string, PackageFile []byte) {
ctx := context.Background()
packageMetadatNamespace := fmt.Sprintf("%s-%s", "eksa-packages", e.ClusterName)
e.T.Log("Apply changes to package", packageName)
err := e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), PackageFile, packageMetadatNamespace)
if err != nil {
e.T.Fatalf("Error upgrading package: %s", err)
return
}
time.Sleep(30 * time.Second) // Add sleep to allow package to change state
}
// CurlEndpointByBusyBox creates a busybox pod with command to curl the target endpoint,
// and returns the created busybox pod name.
func (e *ClusterE2ETest) CurlEndpointByBusyBox(endpoint string, namespace string) string {
ctx := context.Background()
e.T.Log("Launching Busybox pod to curl endpoint", endpoint)
randomname := fmt.Sprintf("%s-%s", "busybox-test", utilrand.String(7))
busyBoxPodName, err := e.KubectlClient.RunBusyBoxPod(context.TODO(),
namespace, randomname, e.KubeconfigFilePath(), []string{"curl", endpoint})
if err != nil {
e.T.Fatalf("error launching busybox pod: %s", err)
}
err = e.KubectlClient.WaitForPodCompleted(ctx,
e.Cluster(), busyBoxPodName, "5m", namespace)
if err != nil {
e.T.Fatalf("waiting for busybox pod %s timed out: %s", busyBoxPodName, err)
}
return busyBoxPodName
}
// MatchLogs matches the log from a container to the expected content. Given it
// takes time for logs to be populated, a retrier with configurable timeout duration
// is added.
func (e *ClusterE2ETest) MatchLogs(targetNamespace string, targetPodName string,
targetContainerName string, expectedLogs string, timeout time.Duration,
) {
e.T.Logf("Match logs for pod %s, container %s in namespace %s", targetPodName,
targetContainerName, targetNamespace)
e.GenerateSupportBundleOnCleanupIfTestFailed()
err := retrier.New(timeout).Retry(func() error {
logs, err := e.KubectlClient.GetPodLogs(context.TODO(), targetNamespace,
targetPodName, targetContainerName, e.KubeconfigFilePath())
if err != nil {
return fmt.Errorf("failure getting pod logs %s", err)
}
fmt.Printf("Logs from pod\n %s\n", logs)
ok := strings.Contains(logs, expectedLogs)
if !ok {
return fmt.Errorf("expected to find %s in the log, got %s", expectedLogs, logs)
}
return nil
})
if err != nil {
e.T.Fatalf("unable to match logs: %s", err)
}
}
// ValidateEndpointContent validates the contents at the target endpoint.
func (e *ClusterE2ETest) ValidateEndpointContent(endpoint string, namespace string, expectedContent string) {
busyBoxPodName := e.CurlEndpointByBusyBox(endpoint, namespace)
e.MatchLogs(namespace, busyBoxPodName, busyBoxPodName, expectedContent, 5*time.Minute)
}
// AirgapDockerContainers airgap docker containers. Outside network should not be reached during airgapped deployment.
func (e *ClusterE2ETest) AirgapDockerContainers(localCIDRs string) {
e.T.Logf("Airgap docker containers...")
e.Run(fmt.Sprintf("sudo iptables -F DOCKER-USER && sudo iptables -I DOCKER-USER -j DROP && sudo iptables -I DOCKER-USER -s %s,172.0.0.0/8,127.0.0.1 -j ACCEPT", localCIDRs))
}
// CreateAirgappedUser create airgapped user and setup the iptables rule. Notice that OUTPUT chain is flushed each time.
func (e *ClusterE2ETest) CreateAirgappedUser(localCIDR string) {
e.Run("if ! id airgap; then sudo useradd airgap -G docker; fi")
e.Run("mkdir ./eksa-cli-logs || chmod 777 ./eksa-cli-logs") // Allow the airgap user to access logs folder
e.Run("chmod -R 777 ./") // Allow the airgap user to access working dir
e.Run("sudo iptables -F OUTPUT")
e.Run(fmt.Sprintf("sudo iptables -A OUTPUT -d %s,172.0.0.0/8,127.0.0.1 -m owner --uid-owner airgap -j ACCEPT", localCIDR))
e.Run("sudo iptables -A OUTPUT -m owner --uid-owner airgap -j REJECT")
}
// AssertAirgappedNetwork make sure that the admin machine is indeed airgapped.
func (e *ClusterE2ETest) AssertAirgappedNetwork() {
cmd := exec.Command("docker", "run", "--rm", "busybox", "ping", "8.8.8.8", "-c", "1", "-W", "2")
out, err := cmd.Output()
e.T.Log(string(out))
if err == nil {
e.T.Fatalf("Docker container is not airgapped")
}
cmd = exec.Command("sudo", "-u", "airgap", "ping", "8.8.8.8", "-c", "1", "-W", "2")
out, err = cmd.Output()
e.T.Log(string(out))
if err == nil {
e.T.Fatalf("Airgap user is not airgapped")
}
}
func dumpFile(description string, path string, t T) {
b, err := os.ReadFile(path)
if err != nil {
t.Fatal(err)
}
t.Logf("%s:\n%s\n", description, string(b))
}
func (e *ClusterE2ETest) setFeatureFlagForUnreleasedKubernetesVersion(version v1alpha1.KubernetesVersion) {
// Update this variable to equal the feature flagged k8s version when applicable.
// For example, if k8s 1.26 is under a feature flag, we would set this to v1alpha1.Kube126
var unreleasedK8sVersion v1alpha1.KubernetesVersion
if version == unreleasedK8sVersion {
// Set feature flag for the unreleased k8s version when applicable
e.T.Logf("Setting k8s version support feature flag...")
}
}
// CreateCloudStackCredentialsSecretFromEnvVar parses the cloudstack credentials from an environment variable,
// builds a new secret object from the credentials in the provided profile and creates it in the cluster.
func (e *ClusterE2ETest) CreateCloudStackCredentialsSecretFromEnvVar(name string, profileName string) {
ctx := context.Background()
execConfig, err := decoder.ParseCloudStackCredsFromEnv()
if err != nil {
e.T.Fatalf("error parsing cloudstack credentials from env: %v", err)
return
}
var selectedProfile *decoder.CloudStackProfileConfig
for _, p := range execConfig.Profiles {
if profileName == p.Name {
selectedProfile = &p
break
}
}
if selectedProfile == nil {
e.T.Fatalf("error finding profile with the name %s", profileName)
return
}
data := map[string][]byte{}
data[decoder.APIKeyKey] = []byte(selectedProfile.ApiKey)
data[decoder.SecretKeyKey] = []byte(selectedProfile.SecretKey)
data[decoder.APIUrlKey] = []byte(selectedProfile.ManagementUrl)
data[decoder.VerifySslKey] = []byte(selectedProfile.VerifySsl)
// Create a new secret with the credentials from the profile, but with a new name.
secret := corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: data,
}
secretContent, err := yaml.Marshal(secret)
if err != nil {
e.T.Fatalf("error mashalling credentials secret : %v", err)
return
}
err = e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), secretContent,
constants.EksaSystemNamespace)
if err != nil {
e.T.Fatalf("error applying credentials secret to cluster %s: %v", e.Cluster().Name, err)
return
}
}
func (e *ClusterE2ETest) addClusterConfigFillers(fillers ...api.ClusterConfigFiller) {
e.clusterConfigFillers = append(e.clusterConfigFillers, fillers...)
}
| 2,225 |
eks-anywhere | aws | Go | package framework
import (
"context"
"fmt"
"time"
machinerytypes "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/retrier"
clusterf "github.com/aws/eks-anywhere/test/framework/cluster"
"github.com/aws/eks-anywhere/test/framework/cluster/validations"
)
func validationsForExpectedObjects() []clusterf.StateValidation {
mediumRetier := retrier.NewWithMaxRetries(120, 5*time.Second)
longRetier := retrier.NewWithMaxRetries(120, 10*time.Second)
return []clusterf.StateValidation{
clusterf.RetriableStateValidation(mediumRetier, validations.ValidateClusterReady),
clusterf.RetriableStateValidation(mediumRetier, validations.ValidateEKSAObjects),
clusterf.RetriableStateValidation(longRetier, validations.ValidateControlPlaneNodes),
clusterf.RetriableStateValidation(longRetier, validations.ValidateWorkerNodes),
clusterf.RetriableStateValidation(mediumRetier, validations.ValidateCilium),
}
}
func validationsForClusterDoesNotExist() []clusterf.StateValidation {
return []clusterf.StateValidation{
clusterf.RetriableStateValidation(retrier.NewWithMaxRetries(120, 5*time.Second), validations.ValidateClusterDoesNotExist),
}
}
func (e *ClusterE2ETest) buildClusterStateValidationConfig(ctx context.Context) {
managementClusterClient, err := buildClusterClient(e.managementKubeconfigFilePath())
if err != nil {
e.T.Fatalf("failed to create management cluster client: %s", err)
}
clusterClient := managementClusterClient
if e.managementKubeconfigFilePath() != e.KubeconfigFilePath() {
clusterClient, err = buildClusterClient(e.KubeconfigFilePath())
}
if err != nil {
e.T.Fatalf("failed to create cluster client: %s", err)
}
spec, err := buildClusterSpec(ctx, managementClusterClient, e.ClusterConfig)
if err != nil {
e.T.Fatalf("failed to build cluster spec with kubeconfig %s: %v", e.KubeconfigFilePath(), err)
}
e.clusterStateValidationConfig = &clusterf.StateValidationConfig{
ClusterClient: clusterClient,
ManagementClusterClient: managementClusterClient,
ClusterSpec: spec,
}
}
func newClusterStateValidator(config *clusterf.StateValidationConfig) *clusterf.StateValidator {
return clusterf.NewStateValidator(*config)
}
func buildClusterClient(kubeconfigFileName string) (client.Client, error) {
var clusterClient client.Client
// Adding the retry logic here because the connection to the client does not always
// succedd on the first try due to connection failure after the kubeconfig becomes
// available in the cluster.
err := retrier.Retry(12, 5*time.Second, func() error {
c, err := kubernetes.NewRuntimeClientFromFileName(kubeconfigFileName)
if err != nil {
return fmt.Errorf("failed to build cluster client: %v", err)
}
clusterClient = c
return nil
})
return clusterClient, err
}
func buildClusterSpec(ctx context.Context, client client.Client, config *cluster.Config) (*cluster.Spec, error) {
clusterConfig := config.DeepCopy()
// The cluster config built by the test does not have certain defaults like the bundle reference,
// so fetch that information from the cluster if missing. This is needed inorder to build the cluster spec.
if clusterConfig.Cluster.Spec.BundlesRef == nil {
clus := &v1alpha1.Cluster{}
key := machinerytypes.NamespacedName{Namespace: clusterConfig.Cluster.Namespace, Name: clusterConfig.Cluster.Name}
if err := client.Get(ctx, key, clus); err != nil {
return nil, fmt.Errorf("failed to get cluster to build spec: %s", err)
}
clusterConfig.Cluster.Spec.BundlesRef = clus.Spec.BundlesRef
}
spec, err := cluster.BuildSpecFromConfig(ctx, clientutil.NewKubeClient(client), clusterConfig)
if err != nil {
return nil, fmt.Errorf("failed to build cluster spec from config: %s", err)
}
return spec, nil
}
| 101 |
eks-anywhere | aws | Go | package framework
import (
"os"
"path/filepath"
"strings"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type CommandOpt func(*string, *[]string) (err error)
func appendOpt(new ...string) CommandOpt {
return func(binaryPath *string, args *[]string) (err error) {
*args = append(*args, new...)
return nil
}
}
func withKubeconfig(kubeconfigFile string) CommandOpt {
return appendOpt("--kubeconfig", kubeconfigFile)
}
func WithForce() CommandOpt {
return appendOpt("--force-cleanup")
}
func WithControlPlaneWaitTimeout(timeout string) CommandOpt {
return appendOpt("--control-plane-wait-timeout", timeout)
}
func WithExternalEtcdWaitTimeout(timeout string) CommandOpt {
return appendOpt("--external-etcd-wait-timeout", timeout)
}
func WithPerMachineWaitTimeout(timeout string) CommandOpt {
return appendOpt("--per-machine-wait-timeout", timeout)
}
func ExecuteWithEksaRelease(release *releasev1alpha1.EksARelease) CommandOpt {
return executeWithBinaryCommandOpt(func() (string, error) {
return getBinary(release)
})
}
// PackagedBinary represents a binary that can be extracted
// executed from local disk.
type PackagedBinary interface {
// BinaryPath returns the local disk path to the binary.
BinaryPath() (string, error)
}
// ExecuteWithBinary executes the command with a binary from an specific path.
func ExecuteWithBinary(eksa PackagedBinary) CommandOpt {
return executeWithBinaryCommandOpt(func() (string, error) {
return eksa.BinaryPath()
})
}
// WithSudo add prefix "sudo" to the command. And preserve PATH.
func WithSudo(user string) CommandOpt {
return func(binaryPath *string, args *[]string) (err error) {
*args = append([]string{*binaryPath}, *args...)
*binaryPath = "sudo"
if user != "" {
*args = append([]string{"-E", "PATH=$PATH", "-u", user}, *args...)
}
return nil
}
}
// WithBundlesOverride modify bundles-override.
func WithBundlesOverride(bundles string) CommandOpt {
return appendOpt("--bundles-override", bundles)
}
type binaryFetcher func() (binaryPath string, err error)
func executeWithBinaryCommandOpt(fetcher binaryFetcher) CommandOpt {
return func(binaryPath *string, args *[]string) (err error) {
b, err := fetcher()
if err != nil {
return err
}
*binaryPath = b
if err = setEksctlVersionEnvVar(); err != nil {
return err
}
// When bundles override is present, the manifest belongs to the current
// build of the CLI and it's intended to be used only with that version
removeFlag("--bundles-override", args)
return nil
}
}
func removeFlag(flag string, args *[]string) {
for i, a := range *args {
if a == flag {
elementsToDelete := 1
// If it's not the last arg and next arg is not a flag,
// that means it's the value for the current flag, remove it as well
if i < len(*args)-1 && !strings.HasPrefix((*args)[i+1], "-") {
elementsToDelete = 2
}
*args = append((*args)[:i], (*args)[i+elementsToDelete:]...)
break
}
}
}
// DefaultLocalEKSABinaryPath returns the full path for the local eks-a binary being tested.
func DefaultLocalEKSABinaryPath() (string, error) {
binDir, err := DefaultLocalEKSABinDir()
if err != nil {
return "", err
}
return filepath.Join(binDir, "eksctl-anywhere"), nil
}
// DefaultLocalEKSABinDir returns the full path for the local directory where
// the tested eks-a binary lives.
func DefaultLocalEKSABinDir() (string, error) {
workDir, err := os.Getwd()
if err != nil {
return "", err
}
return filepath.Join(workDir, "bin"), nil
}
| 134 |
eks-anywhere | aws | Go | package framework
import (
"context"
"fmt"
"strings"
"github.com/aws/eks-anywhere/internal/pkg/conformance"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/version"
)
const kubeConformanceImage = "registry.k8s.io/conformance"
func (e *ClusterE2ETest) RunConformanceTests() {
ctx := context.Background()
cluster := e.Cluster()
setKubeconfigEnvVar(e.T, e.ClusterName)
contextName, err := e.KubectlClient.GetCurrentClusterContext(ctx, cluster)
if err != nil {
e.T.Errorf("Error getting context name: %v", err)
return
}
kubeVersion, err := e.getEksdReleaseKubeVersion()
if err != nil {
e.T.Errorf("Error getting EKS-D release KubeVersion from bundle: %v", err)
return
}
e.T.Log("Downloading Sonobuoy binary for testing")
err = conformance.Download()
if err != nil {
e.T.Errorf("Error downloading Sonobuoy binary: %v", err)
return
}
kubeConformanceImageTagged := fmt.Sprintf("%s:%s", kubeConformanceImage, kubeVersion)
args := []string{"--kube-conformance-image", kubeConformanceImageTagged}
e.T.Logf("Running k8s conformance tests with Image: %s", kubeConformanceImageTagged)
output, err := conformance.RunTests(ctx, contextName, args...)
if err != nil {
e.T.Errorf("Error running k8s conformance tests: %v", err)
return
}
e.T.Logf("Conformance Test run:\n %v", output)
results, err := conformance.GetResults(ctx, contextName, args...)
if err != nil {
e.T.Errorf("Error running k8s conformance tests: %v", err)
return
}
e.T.Logf("Conformance Test results:\n %v", results)
if hasFailed(results) {
e.T.Errorf("Conformance run has failed tests")
return
}
}
func (e *ClusterE2ETest) getEksdReleaseKubeVersion() (string, error) {
c, err := v1alpha1.GetClusterConfig(e.ClusterConfigLocation)
if err != nil {
return "", fmt.Errorf("fetching cluster config from file: %v", err)
}
r := manifests.NewReader(newFileReader())
b, err := r.ReadBundlesForVersion(version.Get().GitVersion)
if err != nil {
return "", fmt.Errorf("getting EKS-D release spec from bundle: %v", err)
}
versionsBundle := bundles.VersionsBundleForKubernetesVersion(b, string(c.Spec.KubernetesVersion))
versionsBundleKubeVersion := versionsBundle.EksD.KubeVersion
if versionsBundleKubeVersion == "" {
return "", fmt.Errorf("getting KubeVersion from EKS-D release spec: value empty")
}
return versionsBundleKubeVersion, nil
}
// Function to parse the conformace test results and look for any failed tests.
// By default we run 2 plugins so we check for failed tests in twice.
func hasFailed(results string) bool {
failedLog := "Failed: 0"
count := strings.Count(results, failedLog)
return count != 2
}
| 85 |
eks-anywhere | aws | Go | package framework
import (
"context"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
// ControlPlaneNodeValidation should return an error if either an error is encountered during execution or the validation logically fails.
// This validation function will be executed by ValidateControlPlaneNodes with a Control Plane configuration and a corresponding node
// which was created as a part of that configuration.
type ControlPlaneNodeValidation func(configuration v1alpha1.ControlPlaneConfiguration, node corev1.Node) (err error)
// ValidateControlPlaneNodes deduces the control plane configuration to node mapping
// and for each configuration/node pair executes the provided validation functions.
func (e *ClusterE2ETest) ValidateControlPlaneNodes(validations ...ControlPlaneNodeValidation) {
ctx := context.Background()
c := e.ClusterConfig.Cluster
cpNodes, err := e.KubectlClient.GetControlPlaneNodes(ctx, e.Cluster().KubeconfigFile)
if err != nil {
e.T.Fatal(err)
}
for _, node := range cpNodes {
for _, validation := range validations {
err = validation(c.Spec.ControlPlaneConfiguration, node)
if err != nil {
e.T.Errorf("Control plane node %v is not valid: %v", node.Name, err)
}
}
}
e.StopIfFailed()
}
| 37 |
eks-anywhere | aws | Go | package framework
import (
"os"
"testing"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
type PackageConfig struct {
*HelmInstallConfig
bundleURI string
packageConfiguration *v1alpha1.PackageConfiguration
}
func WithPackageConfig(t *testing.T, bundleURI, chartName, chartURI,
chartVersion string, chartValues []string, packageConfiguration *v1alpha1.PackageConfiguration,
) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.PackageConfig = &PackageConfig{
HelmInstallConfig: &HelmInstallConfig{
chartName: chartName,
chartURI: chartURI,
chartVersion: chartVersion,
chartValues: chartValues,
HelmClient: buildHelm(t),
},
packageConfiguration: packageConfiguration,
bundleURI: bundleURI,
}
}
}
const (
eksaPackagesRegion = "EKSA_AWS_SECRET_ACCESS_KEY"
eksaPackagesAccessKey = "EKSA_AWS_ACCESS_KEY_ID"
eksaPackagesSecretKey = "EKSA_AWS_REGION"
route53AccessKey = "ROUTE53_ACCESS_KEY_ID"
route53SecretKey = "ROUTE53_SECRET_ACCESS_KEY"
route53Region = "ROUTE53_REGION"
route53ZoneID = "ROUTE53_ZONEID"
)
var requiredPackagesEnvVars = []string{
eksaPackagesRegion,
eksaPackagesAccessKey,
eksaPackagesSecretKey,
}
var requiredCertManagerEnvVars = []string{
route53Region,
route53AccessKey,
route53SecretKey,
route53ZoneID,
}
// RequiredPackagesEnvVars returns the list of packages env vars.
func RequiredPackagesEnvVars() []string {
return requiredPackagesEnvVars
}
// RequiredCertManagerEnvVars returns the list of cert manager env vars.
func RequiredCertManagerEnvVars() []string {
return requiredCertManagerEnvVars
}
// CheckCuratedPackagesCredentials will exit out if the Curated Packages environment variables are not set.
func CheckCuratedPackagesCredentials(t *testing.T) {
for _, env := range requiredPackagesEnvVars {
_, ok := os.LookupEnv(env)
if !ok {
t.Fatalf("Error Unset Packages environment variable: %v is required", env)
}
}
}
// CheckCertManagerCredentials will exit if route53 credentials are not set.
func CheckCertManagerCredentials(t *testing.T) {
for _, env := range requiredCertManagerEnvVars {
_, ok := os.LookupEnv(env)
if !ok {
t.Fatalf("Error Unset Cert Manager environment variable: %v is required", env)
}
}
}
// GetRoute53Configs returns route53 configurations for cert-manager.
func GetRoute53Configs() (string, string, string, string) {
return os.Getenv(route53AccessKey), os.Getenv(route53SecretKey),
os.Getenv(route53Region), os.Getenv(route53ZoneID)
}
| 92 |
eks-anywhere | aws | Go | package framework
import (
"os"
"testing"
"time"
"github.com/aws/eks-anywhere/internal/pkg/api"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/providers/docker"
clusterf "github.com/aws/eks-anywhere/test/framework/cluster"
)
// Docker is a Provider for running end-to-end tests.
type Docker struct {
t *testing.T
executables.Docker
}
const dockerPodCidrVar = "T_DOCKER_POD_CIDR"
// NewDocker creates a new Docker object implementing the Provider interface
// for testing.
func NewDocker(t *testing.T) *Docker {
docker := executables.BuildDockerExecutable()
return &Docker{
t: t,
Docker: *docker,
}
}
// Name implements the Provider interface.
func (d *Docker) Name() string {
return "docker"
}
// Setup implements the Provider interface.
func (d *Docker) Setup() {}
// CleanupVMs implements the Provider interface.
func (d *Docker) CleanupVMs(_ string) error {
return nil
}
// UpdateKubeConfig customizes generated kubeconfig by replacing the server value with correct host
// and the docker LB port. This is required for the docker provider.
func (d *Docker) UpdateKubeConfig(content *[]byte, clusterName string) error {
dockerClient := executables.BuildDockerExecutable()
p := docker.NewProvider(
nil,
dockerClient,
nil,
time.Now,
)
return p.UpdateKubeConfig(content, clusterName)
}
func (d *Docker) WithProviderUpgradeGit() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
// There is no config for docker api objects, no-op
}
}
// ClusterConfigUpdates satisfies the test framework Provider.
func (d *Docker) ClusterConfigUpdates() []api.ClusterConfigFiller {
f := []api.ClusterFiller{}
podCidr := os.Getenv(dockerPodCidrVar)
if podCidr != "" {
f = append(f, api.WithPodCidr(podCidr))
}
return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...)}
}
// WithNewWorkerNodeGroup returns an api.ClusterFiller that adds a new workerNodeGroupConfiguration and
// a corresponding DockerMachineConfig to the cluster config.
func (d *Docker) WithNewWorkerNodeGroup(machineConfig string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller {
return api.ClusterToConfigFiller(workerNodeGroup.ClusterFiller())
}
// ClusterStateValidations returns a list of provider specific validations.
func (d *Docker) ClusterStateValidations() []clusterf.StateValidation {
return []clusterf.StateValidation{}
}
// WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version.
func (d *Docker) WithKubeVersionAndOS(osFamily anywherev1.OSFamily, kubeVersion anywherev1.KubernetesVersion) api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)),
)
}
| 92 |
eks-anywhere | aws | Go | package framework
import (
"log"
"github.com/aws/eks-anywhere/pkg/semver"
)
func newVersion(version string) *semver.Version {
v, err := semver.New(version)
if err != nil {
log.Fatalf("error creating semver for EKS-A version %s: %v", version, err)
}
return v
}
| 16 |
eks-anywhere | aws | Go | package framework
import (
"os"
)
func checkRequiredEnvVars(t T, requiredEnvVars []string) {
for _, eVar := range requiredEnvVars {
if _, ok := os.LookupEnv(eVar); !ok {
t.Fatalf("Required env var [%s] not present", eVar)
}
}
}
func setKubeconfigEnvVar(t T, clusterName string) {
err := os.Setenv("KUBECONFIG", clusterName+"/"+clusterName+"-eks-a-cluster.kubeconfig")
if err != nil {
t.Fatalf("Error setting KUBECONFIG env var: %v", err)
}
}
func getEnvWithDefault(key, defaultValue string) string {
if value, ok := os.LookupEnv(key); ok {
return value
}
return defaultValue
}
| 29 |
eks-anywhere | aws | Go | package framework
import (
"context"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
func buildKubectl(t T) *executables.Kubectl {
ctx := context.Background()
kubectl := executableBuilder(ctx, t).BuildKubectlExecutable()
return kubectl
}
func buildLocalKubectl() *executables.Kubectl {
return executables.NewLocalExecutablesBuilder().BuildKubectlExecutable()
}
func executableBuilder(ctx context.Context, t T) *executables.ExecutablesBuilder {
executableBuilder, close, err := executables.InitInDockerExecutablesBuilder(ctx, executables.DefaultEksaImage())
if err != nil {
t.Fatalf("Unable initialize executable builder: %v", err)
}
t.Cleanup(func() {
if err := close(ctx); err != nil {
t.Fatal(err)
}
})
return executableBuilder
}
func buildGovc(t T) *executables.Govc {
ctx := context.Background()
tmpWriter, err := filewriter.NewWriter("unique-ip")
if err != nil {
t.Fatalf("Error creating tmp writer")
}
govc := executableBuilder(ctx, t).BuildGovcExecutable(tmpWriter)
t.Cleanup(func() {
govc.Close(ctx)
})
return govc
}
func buildDocker(t T) *executables.Docker {
return executables.BuildDockerExecutable()
}
func buildHelm(t T) *executables.Helm {
ctx := context.Background()
helm := executableBuilder(ctx, t).BuildHelmExecutable(executables.WithInsecure())
return helm
}
func buildSSH(t T) *executables.SSH {
return executables.NewLocalExecutablesBuilder().BuildSSHExecutable()
}
func buildCmk(t T) *executables.Cmk {
ctx := context.Background()
tmpWriter, err := filewriter.NewWriter("cmk")
if err != nil {
t.Fatalf("Error creating tmp writer")
}
execConfig, err := decoder.ParseCloudStackCredsFromEnv()
if err != nil {
t.Fatalf("parsing cloudstack credentials from environment: %v", err)
}
cmk, err := executableBuilder(ctx, t).BuildCmkExecutable(tmpWriter, execConfig)
if err != nil {
t.Fatalf("Error creating cmk client: %v", err)
}
t.Cleanup(func() {
cmk.Close(ctx)
})
return cmk
}
| 87 |
eks-anywhere | aws | Go | package framework
import "os"
func fileExists(filePath string) bool {
_, err := os.Stat(filePath)
return err == nil
}
| 9 |
eks-anywhere | aws | Go | package framework
import (
"bytes"
"context"
"fmt"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"time"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermarshaller"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/git"
gitfactory "github.com/aws/eks-anywhere/pkg/git/factory"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/version"
)
const (
eksaConfigFileName = "eksa-cluster.yaml"
fluxSystemNamespace = "flux-system"
GitRepositoryVar = "T_GIT_REPOSITORY"
GitRepoSshUrl = "T_GIT_SSH_REPO_URL"
GithubUserVar = "T_GITHUB_USER"
GithubTokenVar = "EKSA_GITHUB_TOKEN"
GitKnownHosts = "EKSA_GIT_KNOWN_HOSTS"
GitPrivateKeyFile = "EKSA_GIT_PRIVATE_KEY"
DefaultFluxConfigName = "eksa-test"
)
var fluxGithubRequiredEnvVars = []string{
GitRepositoryVar,
GithubUserVar,
GithubTokenVar,
}
var fluxGitRequiredEnvVars = []string{
GitKnownHosts,
GitPrivateKeyFile,
GitRepoSshUrl,
}
var fluxGitCreateGenerateRepoEnvVars = []string{
GitKnownHosts,
GitPrivateKeyFile,
GithubUserVar,
GithubTokenVar,
}
func getJobIDFromEnv() string {
return os.Getenv(JobIdVar)
}
func WithFluxGit(opts ...api.FluxConfigOpt) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
checkRequiredEnvVars(e.T, fluxGitRequiredEnvVars)
jobID := strings.Replace(getJobIDFromEnv(), ":", "-", -1)
e.ClusterConfig.FluxConfig = api.NewFluxConfig(DefaultFluxConfigName,
api.WithGenericGitProvider(
api.WithStringFromEnvVarGenericGitProviderConfig(GitRepoSshUrl, api.WithGitRepositoryUrl),
),
api.WithSystemNamespace("default"),
api.WithClusterConfigPath(jobID),
api.WithBranch(jobID),
)
e.clusterFillers = append(e.clusterFillers,
api.WithGitOpsRef(DefaultFluxConfigName, v1alpha1.FluxConfigKind),
)
// apply the rest of the opts passed into the function
for _, opt := range opts {
opt(e.ClusterConfig.FluxConfig)
}
e.T.Cleanup(e.CleanUpGitRepo)
}
}
func WithFluxGithub(opts ...api.FluxConfigOpt) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
fluxConfigName := fluxConfigName()
checkRequiredEnvVars(e.T, fluxGithubRequiredEnvVars)
e.ClusterConfig.FluxConfig = api.NewFluxConfig(fluxConfigName,
api.WithGithubProvider(
api.WithPersonalGithubRepository(true),
api.WithStringFromEnvVarGithubProviderConfig(GitRepositoryVar, api.WithGithubRepository),
api.WithStringFromEnvVarGithubProviderConfig(GithubUserVar, api.WithGithubOwner),
),
api.WithSystemNamespace("default"),
api.WithClusterConfigPath("path2"),
api.WithBranch("main"),
)
e.clusterFillers = append(e.clusterFillers,
api.WithGitOpsRef(fluxConfigName, v1alpha1.FluxConfigKind),
)
// apply the rest of the opts passed into the function
for _, opt := range opts {
opt(e.ClusterConfig.FluxConfig)
}
// Adding Job ID suffix to repo name
// e2e test jobs have Job Id with a ":", replacing with "-"
jobID := strings.Replace(getJobIDFromEnv(), ":", "-", -1)
withFluxRepositorySuffix(jobID)(e.ClusterConfig.FluxConfig)
// Setting GitRepo cleanup since GitOps configured
e.T.Cleanup(e.CleanUpGithubRepo)
}
}
// WithFluxGithubConfig returns ClusterConfigFiller that adds FluxConfig using the Github provider to the cluster config.
func WithFluxGithubConfig(opts ...api.FluxConfigOpt) api.ClusterConfigFiller {
fluxConfigName := fluxConfigName()
return api.JoinClusterConfigFillers(func(config *cluster.Config) {
config.FluxConfig = api.NewFluxConfig(fluxConfigName,
api.WithGithubProvider(
api.WithPersonalGithubRepository(true),
api.WithStringFromEnvVarGithubProviderConfig(GitRepositoryVar, api.WithGithubRepository),
api.WithStringFromEnvVarGithubProviderConfig(GithubUserVar, api.WithGithubOwner),
),
api.WithSystemNamespace("default"),
api.WithBranch("main"),
)
// apply the rest of the opts passed into the function
for _, opt := range opts {
opt(config.FluxConfig)
}
// Adding Job ID suffix to repo name
// e2e test jobs have Job Id with a ":", replacing with "-"
jobID := strings.Replace(getJobIDFromEnv(), ":", "-", -1)
withFluxRepositorySuffix(jobID)(config.FluxConfig)
}, api.ClusterToConfigFiller(api.WithGitOpsRef(fluxConfigName, v1alpha1.FluxConfigKind)))
}
// WithFluxGithubEnvVarCheck returns a ClusterE2ETestOpt that checks for the required env vars.
func WithFluxGithubEnvVarCheck() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
checkRequiredEnvVars(e.T, fluxGithubRequiredEnvVars)
}
}
// WithFluxGithubCleanup returns a ClusterE2ETestOpt that registers the git repository cleanup operation.
func WithFluxGithubCleanup() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.T.Cleanup(e.CleanUpGithubRepo)
}
}
func WithClusterUpgradeGit(fillers ...api.ClusterFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(
api.ClusterToConfigFiller(fillers...),
func(c *cluster.Config) {
// TODO: e.ClusterConfig.GitOpsConfig is defined from api.NewGitOpsConfig in WithFluxLegacy()
// instead of marshalling from the actual file in git repo.
// By default it does not include the namespace field. But Flux requires namespace always
// exist for all the objects managed by its kustomization controller.
// Need to refactor this to read gitopsconfig directly from file in git repo
// which always has the namespace field.
if c.GitOpsConfig != nil {
if c.GitOpsConfig.GetNamespace() == "" {
c.GitOpsConfig.SetNamespace("default")
}
c.FluxConfig = c.GitOpsConfig.ConvertToFluxConfig()
}
if c.FluxConfig.GetNamespace() == "" {
c.FluxConfig.SetNamespace("default")
}
},
)
}
}
func withFluxRepositorySuffix(suffix string) api.FluxConfigOpt {
return func(c *v1alpha1.FluxConfig) {
repository := c.Spec.Github.Repository
c.Spec.Github.Repository = fmt.Sprintf("%s-%s", repository, suffix)
}
}
func fluxConfigName() string {
return fmt.Sprintf("%s-%s", defaultClusterName, test.RandString(5))
}
func (e *ClusterE2ETest) UpgradeWithGitOps(clusterOpts ...ClusterE2ETestOpt) {
e.upgradeWithGitOps(clusterOpts)
}
func (e *ClusterE2ETest) upgradeWithGitOps(clusterOpts []ClusterE2ETestOpt) {
ctx := context.Background()
e.initGit(ctx)
if err := e.validateInitialFluxState(ctx); err != nil {
e.T.Errorf("Error validating initial state of cluster gitops system: %v", err)
}
err := e.pullRemoteConfig(ctx)
if err != nil {
e.T.Errorf("pulling remote configuration: %v", err)
}
e.T.Log("Parsing pulled config from repo into test ClusterConfig")
// Read the cluster config we just pulled into e.ClusterConfig
e.parseClusterConfigFromLocalGitRepo()
// Apply the options, these are most of the times fillers, so they will update the
// cluster config we just read from the repo. This has to happen after we parse the cluster
// config from the repo or we might be updating a different version of the config.
for _, opt := range clusterOpts {
opt(e)
}
e.T.Log("Updating local cluster config file in git repo for upgrade")
// Marshall e.ClusterConfig and write it to the repo path
e.buildClusterConfigFileForGit()
if err := e.pushConfigChanges(ctx); err != nil {
e.T.Errorf("Error pushing local changes to remote git repo: %v", err)
}
e.T.Logf("Successfully updated version controlled cluster configuration")
if err := e.validateWorkerNodeUpdates(ctx); err != nil {
e.T.Errorf("Error validating worker nodes after updating git repo: %v", err)
}
}
func (e *ClusterE2ETest) initGit(ctx context.Context) {
c := e.ClusterConfig.Cluster
writer, err := filewriter.NewWriter(e.Cluster().Name)
if err != nil {
e.T.Errorf("Error configuring filewriter for e2e test: %v", err)
}
if e.ClusterConfig.GitOpsConfig != nil {
e.ClusterConfig.FluxConfig = e.ClusterConfig.GitOpsConfig.ConvertToFluxConfig()
}
g, err := e.NewGitTools(ctx, c, e.ClusterConfig.FluxConfig, writer, "")
if err != nil {
e.T.Errorf("Error configuring git client for e2e test: %v", err)
}
e.GitProvider = g.Provider
e.GitWriter = g.Writer
e.GitClient = g.Client
}
func (e *ClusterE2ETest) workloadClusterConfigPath(w *WorkloadCluster) string {
return e.clusterConfigPathFromName(w.ClusterName)
}
func (e *ClusterE2ETest) workloadClusterConfigGitPath(w *WorkloadCluster) string {
return filepath.Join(e.GitWriter.Dir(), e.workloadClusterConfigPath(w))
}
func (e *ClusterE2ETest) buildWorkloadClusterConfigFileForGit(w *WorkloadCluster) {
b := w.generateClusterConfigYaml()
g := e.GitWriter
p := filepath.Dir(e.workloadClusterConfigGitPath(w))
if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) {
err := os.MkdirAll(p, os.ModePerm)
if err != nil {
w.T.Fatalf("Creating directory [%s]: %v", g.Dir(), err)
}
}
_, err := g.Write(e.workloadClusterConfigPath(w), b, filewriter.PersistentFile)
if err != nil {
w.T.Fatalf("Error writing cluster config file to local git folder: %v", err)
}
}
func (e *ClusterE2ETest) addWorkloadClusterConfigToGit(ctx context.Context, w *WorkloadCluster) error {
p := e.workloadClusterConfigPath(w)
g := e.GitClient
if err := g.Add(p); err != nil {
return fmt.Errorf("adding cluster path changes at %s: %v", p, err)
}
if err := e.pushStagedChanges(ctx, "EKS-A E2E Flux test workload configuration changes added"); err != nil {
return fmt.Errorf("failed to push workload configuration changes %v", err)
}
return nil
}
func (e *ClusterE2ETest) deleteWorkloadClusterConfigFromGit(ctx context.Context, w *WorkloadCluster) error {
p := filepath.Dir(e.workloadClusterConfigPath(w))
g := e.GitClient
if err := g.Remove(p); err != nil {
return fmt.Errorf("removing cluster config at path %s: %v", p, err)
}
if err := e.pushStagedChanges(ctx, "EKS-A E2E Flux test workload configuration deleted"); err != nil {
return err
}
return nil
}
func (e *ClusterE2ETest) pushStagedChanges(ctx context.Context, commitMessage string) error {
g := e.GitClient
if err := g.Commit(commitMessage); err != nil {
return fmt.Errorf("commiting staged changes: %v", err)
}
repoUpToDateErr := &git.RepositoryUpToDateError{}
if err := g.Push(ctx); err != nil {
if !errors.Is(err, repoUpToDateErr) {
return fmt.Errorf("pushing staged changes to remote: %v", err)
}
e.T.Log(err.Error())
}
return nil
}
func (e *ClusterE2ETest) pushWorkloadClusterToGit(w *WorkloadCluster, opts ...api.ClusterConfigFiller) error {
ctx := context.Background()
e.initGit(ctx)
// Pull remote config using managment cluster
err := e.pullRemoteConfig(ctx)
if err != nil {
e.T.Errorf("Pulling remote configuration: %v", err)
}
if _, err := os.Stat(e.workloadClusterConfigGitPath(w)); err == nil {
// Read the cluster config we just pulled into w.ClusterConfig
e.T.Log("Parsing pulled config from repo into test ClusterConfig")
w.parseClusterConfigFromDisk(e.workloadClusterConfigGitPath(w))
}
// Update the cluster config with the provided api.ClusterConfigFillers
w.UpdateClusterConfig(opts...)
e.T.Log("Updating local config file in git repo")
// Marshall w.ClusterConfig and write it to the repo path
e.buildWorkloadClusterConfigFileForGit(w)
if err := e.addWorkloadClusterConfigToGit(ctx, w); err != nil {
return fmt.Errorf("failed to push local changes to remote git repo: %v", err)
}
e.T.Logf("Successfully pushed version controlled cluster configuration")
return nil
}
func (e *ClusterE2ETest) deleteWorkloadClusterFromGit(w *WorkloadCluster) error {
ctx := context.Background()
e.initGit(ctx)
err := e.pullRemoteConfig(ctx)
if err != nil {
e.T.Errorf("Pulling remote configuration: %v", err)
}
if err := e.deleteWorkloadClusterConfigFromGit(ctx, w); err != nil {
return fmt.Errorf("failed to push local changes to remote git repo: %v", err)
}
w.T.Logf("Successfully deleted version controlled cluster")
return nil
}
func (e *ClusterE2ETest) parseClusterConfigFromLocalGitRepo() {
c, err := cluster.ParseConfigFromFile(e.clusterConfigGitPath())
if err != nil {
e.T.Fatalf("Failed parsing cluster config from git repo: %s", err)
}
e.ClusterConfig = c
}
func (e *ClusterE2ETest) buildClusterConfigFileForGit() {
b := e.generateClusterConfigYaml()
_, err := e.GitWriter.Write(e.clusterConfGitPath(), b, filewriter.PersistentFile)
if err != nil {
e.T.Errorf("Error writing cluster config file to local git folder: %v", err)
}
}
func (e *ClusterE2ETest) ValidateFlux() {
c := e.ClusterConfig.Cluster
writer, err := filewriter.NewWriter(e.Cluster().Name)
if err != nil {
e.T.Errorf("Error configuring filewriter for e2e test: %v", err)
}
ctx := context.Background()
if e.ClusterConfig.GitOpsConfig != nil {
e.ClusterConfig.FluxConfig = e.ClusterConfig.GitOpsConfig.ConvertToFluxConfig()
}
g, err := e.NewGitTools(ctx, c, e.ClusterConfig.FluxConfig, writer, "")
if err != nil {
e.T.Errorf("Error configuring git client for e2e test: %v", err)
}
e.GitClient = g.Client
e.GitProvider = g.Provider
e.GitWriter = g.Writer
if err = e.validateInitialFluxState(ctx); err != nil {
e.T.Errorf("Error validating initial state of cluster gitops system: %v", err)
}
if err = e.validateWorkerNodeReplicaUpdates(ctx); err != nil {
e.T.Errorf("Error validating scaling of Flux managed cluster: %v", err)
}
if err = e.validateWorkerNodeMultiConfigUpdates(ctx); err != nil {
e.T.Errorf("Error upgrading worker nodes: %v", err)
}
writer, err = filewriter.NewWriter("")
if err != nil {
e.T.Errorf("Error configuring filewriter for e2e test: %v", err)
}
repoName := e.gitRepoName()
gitTools, err := e.NewGitTools(ctx, c, e.ClusterConfig.FluxConfig, writer, e.validateGitopsRepoContentPath(repoName))
if err != nil {
e.T.Errorf("Error configuring git client for e2e test: %v", err)
}
e.validateGitopsRepoContent(gitTools)
}
func (e *ClusterE2ETest) CleanUpGitRepo() {
c := e.ClusterConfig.Cluster
writer, err := filewriter.NewWriter(e.Cluster().Name)
if err != nil {
e.T.Errorf("configuring filewriter for e2e test: %v", err)
}
ctx := context.Background()
repoName := e.gitRepoName()
gitTools, err := e.NewGitTools(ctx, c, e.ClusterConfig.FluxConfig, writer, fmt.Sprintf("%s/%s", e.ClusterName, repoName))
if err != nil {
e.T.Errorf("configuring git client for e2e test: %v", err)
}
dirEntries, err := os.ReadDir(gitTools.RepositoryDirectory)
if errors.Is(err, os.ErrNotExist) {
e.T.Logf("repository directory %s does not exist; skipping cleanup", gitTools.RepositoryDirectory)
return
}
for _, entry := range dirEntries {
if entry.Name() == ".git" {
continue
}
if entry.IsDir() {
err = os.RemoveAll(entry.Name())
e.T.Logf("cleaning up directory: %v", entry.Name())
if err != nil {
e.T.Log("did not remove directory", "dir", entry.Name(), "err", err)
continue
}
}
if !entry.IsDir() {
err = os.Remove(entry.Name())
e.T.Logf("cleaning up file: %v", entry.Name())
if err != nil {
e.T.Log("did not remove file", "file", entry.Name(), "err", err)
continue
}
}
}
if err = gitTools.Client.Add("*"); err != nil {
e.T.Logf("did not add files while cleaning up git repo: %v", err)
}
if err = gitTools.Client.Push(context.Background()); err != nil {
e.T.Logf("did not push to repo after cleanup: %v", err)
}
}
func (e *ClusterE2ETest) CleanUpGithubRepo() {
c := e.ClusterConfig.Cluster
writer, err := filewriter.NewWriter(e.Cluster().Name)
if err != nil {
e.T.Errorf("Error configuring filewriter for e2e test: %v", err)
}
ctx := context.Background()
if e.ClusterConfig.GitOpsConfig != nil {
e.ClusterConfig.FluxConfig = e.ClusterConfig.GitOpsConfig.ConvertToFluxConfig()
}
owner := e.ClusterConfig.FluxConfig.Spec.Github.Owner
repoName := e.gitRepoName()
gitTools, err := e.NewGitTools(ctx, c, e.ClusterConfig.FluxConfig, writer, fmt.Sprintf("%s/%s", e.ClusterName, repoName))
if err != nil {
e.T.Errorf("Error configuring git client for e2e test: %v", err)
}
opts := git.DeleteRepoOpts{Owner: owner, Repository: repoName}
repo, err := gitTools.Provider.GetRepo(ctx)
if err != nil {
e.T.Errorf("error getting Github repo %s: %v", repoName, err)
}
if repo == nil {
e.T.Logf("Skipped repo deletion: remote repo %s does not exist", repoName)
return
}
err = gitTools.Provider.DeleteRepo(ctx, opts)
if err != nil {
e.T.Errorf("error while deleting Github repo %s: %v", repoName, err)
}
}
type providerConfig struct {
datacenterConfig providers.DatacenterConfig
machineConfigs []providers.MachineConfig
}
func (e *ClusterE2ETest) validateInitialFluxState(ctx context.Context) error {
if err := e.validateFluxDeployments(ctx); err != nil {
return err
}
if err := e.validateEksaSystemDeployments(ctx); err != nil {
return err
}
return nil
}
func (e *ClusterE2ETest) validateWorkerNodeMultiConfigUpdates(ctx context.Context) error {
switch e.ClusterConfig.Cluster.Spec.DatacenterRef.Kind {
case v1alpha1.VSphereDatacenterKind:
clusterConfGitPath := e.clusterConfigGitPath()
machineTemplateName, err := e.machineTemplateName(ctx)
if err != nil {
return err
}
vsphereClusterConfig, err := v1alpha1.GetVSphereDatacenterConfig(clusterConfGitPath)
if err != nil {
return err
}
// update workernode specs
vsphereMachineConfigs, err := v1alpha1.GetVSphereMachineConfigs(clusterConfGitPath)
if err != nil {
return err
}
cpName := e.ClusterConfig.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
workerName := e.ClusterConfig.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
etcdName := ""
if e.ClusterConfig.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdName = e.ClusterConfig.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
}
vsphereMachineConfigs[workerName].Spec.DiskGiB = vsphereMachineConfigs[workerName].Spec.DiskGiB + 10
vsphereMachineConfigs[workerName].Spec.MemoryMiB = 10196
vsphereMachineConfigs[workerName].Spec.NumCPUs = 1
// update replica
clusterSpec, err := e.clusterSpecFromGit()
if err != nil {
return err
}
count := *clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count + 1
clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = &count
providerConfig := providerConfig{
datacenterConfig: vsphereClusterConfig,
machineConfigs: e.convertVSphereMachineConfigs(cpName, workerName, etcdName, vsphereMachineConfigs),
}
_, err = e.updateEKSASpecInGit(ctx, clusterSpec, providerConfig)
if err != nil {
return err
}
err = e.validateWorkerNodeUpdates(ctx)
if err != nil {
return err
}
newMachineTemplateName, err := e.machineTemplateName(ctx)
if err != nil {
return err
}
if machineTemplateName == newMachineTemplateName {
return fmt.Errorf("machine template name should change on machine resource updates, old %s and new %s", machineTemplateName, newMachineTemplateName)
}
return nil
default:
return nil
}
}
func (e *ClusterE2ETest) validateGitopsRepoContentPath(repoName string) string {
return filepath.Join(e.ClusterName, "e2e-validate", repoName)
}
func (e *ClusterE2ETest) validateGitopsRepoContent(gitTools *gitfactory.GitTools) {
repoName := e.gitRepoName()
gitFilePath := e.clusterConfigGitPath()
localFilePath := filepath.Join(e.validateGitopsRepoContentPath(repoName), e.clusterConfGitPath())
ctx := context.Background()
gc := gitTools.Client
err := gc.Clone(ctx)
if err != nil {
e.T.Errorf("Error cloning github repo: %v", err)
}
branch := e.gitBranch()
err = gc.Branch(branch)
if err != nil {
e.T.Errorf("Error checking out branch: %v", err)
}
gitFile, err := os.ReadFile(gitFilePath)
if err != nil {
e.T.Errorf("Error opening file from the original repo directory: %v", err)
}
localFile, err := os.ReadFile(localFilePath)
if err != nil {
e.T.Errorf("Error opening file from the newly created repo directory: %v", err)
}
if !bytes.Equal(gitFile, localFile) {
e.T.Errorf("Error validating the content of git repo: %v", err)
}
}
func (e *ClusterE2ETest) convertVSphereMachineConfigs(cpName, workerName, etcdName string, vsphereMachineConfigs map[string]*v1alpha1.VSphereMachineConfig) []providers.MachineConfig {
var configs []providers.MachineConfig
if vsphereMachineConfigs[cpName] != nil {
configs = append(configs, vsphereMachineConfigs[cpName])
}
if workerName != cpName && vsphereMachineConfigs[workerName] != nil {
configs = append(configs, vsphereMachineConfigs[workerName])
}
if etcdName != "" && etcdName != cpName && etcdName != workerName && vsphereMachineConfigs[etcdName] != nil {
configs = append(configs, vsphereMachineConfigs[etcdName])
}
return configs
}
func (e *ClusterE2ETest) convertCloudstackMachineConfigs(cpName, workerName, etcdName string, cloudstackMachineConfigs map[string]*v1alpha1.CloudStackMachineConfig) []providers.MachineConfig {
var configs []providers.MachineConfig
if cloudstackMachineConfigs[cpName] != nil {
configs = append(configs, cloudstackMachineConfigs[cpName])
}
if workerName != cpName && cloudstackMachineConfigs[workerName] != nil {
configs = append(configs, cloudstackMachineConfigs[workerName])
}
if etcdName != "" && etcdName != cpName && etcdName != workerName && cloudstackMachineConfigs[etcdName] != nil {
configs = append(configs, cloudstackMachineConfigs[etcdName])
}
return configs
}
func (e *ClusterE2ETest) validateWorkerNodeReplicaUpdates(ctx context.Context) error {
machineTemplateName, err := e.machineTemplateName(ctx)
if err != nil {
return err
}
_, err = e.updateWorkerNodeCountValue(ctx, 3)
if err != nil {
return err
}
if err := e.validateWorkerNodeUpdates(ctx); err != nil {
return err
}
_, err = e.updateWorkerNodeCountValue(ctx, 1)
if err != nil {
return err
}
newMachineTemplateName, err := e.machineTemplateName(ctx)
if err != nil {
return err
}
if machineTemplateName != newMachineTemplateName {
return fmt.Errorf("machine template name shouldn't change on just replica updates, old %s and new %s", machineTemplateName, newMachineTemplateName)
}
return e.validateWorkerNodeUpdates(ctx)
}
func (e *ClusterE2ETest) validateWorkerNodeUpdates(ctx context.Context, opts ...CommandOpt) error {
clusterConfGitPath := e.clusterConfigGitPath()
clusterConfig, err := v1alpha1.GetClusterConfig(clusterConfGitPath)
if err != nil {
return err
}
if err := e.waitForWorkerScaling(clusterConfig.Spec.WorkerNodeGroupConfigurations[0].Name, *clusterConfig.Spec.WorkerNodeGroupConfigurations[0].Count); err != nil {
return err
}
e.T.Log("Validating Worker Nodes replicas")
if err := e.waitForWorkerNodeValidation(); err != nil {
return err
}
e.T.Log("Validating Worker Node Machine Template")
return e.validateWorkerNodeMachineSpec(ctx, clusterConfGitPath)
}
func (e *ClusterE2ETest) machineTemplateName(ctx context.Context) (string, error) {
machineTemplateName, err := e.KubectlClient.MachineTemplateName(ctx, e.ClusterConfig.Cluster.Name, e.Cluster().KubeconfigFile, executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return "", err
}
return machineTemplateName, nil
}
func (e *ClusterE2ETest) validateFluxDeployments(ctx context.Context) error {
deploymentReplicas := 1
expectedDeployments := map[string]int{
"helm-controller": deploymentReplicas,
"kustomize-controller": deploymentReplicas,
"notification-controller": deploymentReplicas,
"source-controller": deploymentReplicas,
}
return e.validateDeploymentsInManagementCluster(ctx, fluxSystemNamespace, expectedDeployments)
}
func (e *ClusterE2ETest) validateEksaSystemDeployments(ctx context.Context) error {
expectedDeployments := map[string]int{"eksa-controller-manager": 1}
return e.validateDeploymentsInManagementCluster(ctx, constants.EksaSystemNamespace, expectedDeployments)
}
func (e *ClusterE2ETest) validateDeploymentsInManagementCluster(ctx context.Context, namespace string, expectedeployments map[string]int) error {
err := retrier.Retry(20, time.Second, func() error {
e.T.Logf("Getting deployments in %s namespace...", namespace)
deployments, err := e.KubectlClient.GetDeployments(
ctx,
executables.WithKubeconfig(e.managementKubeconfigFilePath()),
executables.WithNamespace(namespace),
)
if err != nil {
return fmt.Errorf("getting deployments: %v", err)
}
for _, deployment := range deployments {
_, ok := expectedeployments[deployment.Name]
if !ok {
e.T.Errorf("Error validating %s deployments; unepxected deployment %s present in namespace", namespace, deployment.Name)
}
if expectedeployments[deployment.Name] != int(deployment.Status.ReadyReplicas) {
e.T.Log("Deployments have not scaled yet")
return fmt.Errorf("expected %d ready replicas of deployment %s; got %d ready replicas", expectedeployments[deployment.Name], deployment.Name, deployment.Status.ReadyReplicas)
}
}
return nil
})
if err != nil {
e.T.Errorf("Error validating %s deployments: %v", namespace, err)
return err
}
e.T.Logf("Successfully validated %s deployments are present and ready", namespace)
return nil
}
func (e *ClusterE2ETest) updateWorkerNodeCountValue(ctx context.Context, newValue int) (string, error) {
clusterConfGitPath := e.clusterConfigGitPath()
providerConfig, err := e.providerConfig(clusterConfGitPath)
if err != nil {
return "", err
}
e.T.Logf("Updating workerNodeGroupConfiguration count to new value %d", newValue)
clusterSpec, err := e.clusterSpecFromGit()
if err != nil {
return "", err
}
clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = &newValue
p, err := e.updateEKSASpecInGit(ctx, clusterSpec, *providerConfig)
if err != nil {
return "", err
}
e.T.Logf("Successfully updated workerNodeGroupConfiguration count to new value %d", newValue)
return p, nil
}
func (e *ClusterE2ETest) providerConfig(clusterConfGitPath string) (*providerConfig, error) {
var providerConfig providerConfig
switch e.ClusterConfig.Cluster.Spec.DatacenterRef.Kind {
case v1alpha1.VSphereDatacenterKind:
datacenterConfig, err := v1alpha1.GetVSphereDatacenterConfig(clusterConfGitPath)
if err != nil {
return nil, err
}
machineConfigs, err := v1alpha1.GetVSphereMachineConfigs(clusterConfGitPath)
if err != nil {
return nil, err
}
providerConfig.datacenterConfig = datacenterConfig
etcdName := ""
if e.ClusterConfig.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdName = e.ClusterConfig.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
}
providerConfig.machineConfigs = e.convertVSphereMachineConfigs(
e.ClusterConfig.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name,
e.ClusterConfig.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name,
etcdName,
machineConfigs)
case v1alpha1.DockerDatacenterKind:
datacenterConfig, err := v1alpha1.GetDockerDatacenterConfig(clusterConfGitPath)
if err != nil {
return nil, err
}
providerConfig.datacenterConfig = datacenterConfig
case v1alpha1.CloudStackDatacenterKind:
datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(clusterConfGitPath)
if err != nil {
return nil, err
}
providerConfig.datacenterConfig = datacenterConfig
machineConfigs, err := v1alpha1.GetCloudStackMachineConfigs(clusterConfGitPath)
if err != nil {
return nil, err
}
etcdName := ""
if e.ClusterConfig.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdName = e.ClusterConfig.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
}
providerConfig.machineConfigs = e.convertCloudstackMachineConfigs(
e.ClusterConfig.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name,
e.ClusterConfig.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name,
etcdName,
machineConfigs)
default:
return nil, fmt.Errorf("unexpected DatacenterRef %s", e.ClusterConfig.Cluster.Spec.DatacenterRef.Kind)
}
return &providerConfig, nil
}
func (e *ClusterE2ETest) waitForWorkerNodeValidation() error {
ctx := context.Background()
return retrier.Retry(120, time.Second*10, func() error {
e.T.Log("Attempting to validate worker nodes...")
if err := e.KubectlClient.ValidateWorkerNodes(ctx, e.ClusterConfig.Cluster.Name, e.managementKubeconfigFilePath()); err != nil {
e.T.Logf("Worker node validation failed: %v", err)
return fmt.Errorf("validating worker nodes: %v", err)
}
return nil
})
}
func (e *ClusterE2ETest) validateWorkerNodeMachineSpec(ctx context.Context, clusterConfGitPath string) error {
switch e.ClusterConfig.Cluster.Spec.DatacenterRef.Kind {
case v1alpha1.VSphereDatacenterKind:
clusterConfig, err := v1alpha1.GetClusterConfig(clusterConfGitPath)
if err != nil {
return err
}
vsphereClusterConfig, err := v1alpha1.GetVSphereDatacenterConfig(clusterConfGitPath)
if err != nil {
return err
}
vsphereMachineConfigs, err := v1alpha1.GetVSphereMachineConfigs(clusterConfGitPath)
if err != nil {
return err
}
vsphereWorkerConfig := vsphereMachineConfigs[clusterConfig.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name]
return retrier.Retry(120, time.Second*10, func() error {
vsMachineTemplate, err := e.KubectlClient.VsphereWorkerNodesMachineTemplate(ctx, clusterConfig.Name, e.managementKubeconfigFilePath(), constants.EksaSystemNamespace)
if err != nil {
return err
}
if vsphereWorkerConfig.Spec.NumCPUs != int(vsMachineTemplate.Spec.Template.Spec.NumCPUs) {
err := fmt.Errorf("MachineSpec %s WorkloadVMsNumCPUs are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereWorkerConfig.Spec.NumCPUs, vsMachineTemplate.Spec.Template.Spec.NumCPUs)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereWorkerConfig.Spec.DiskGiB != int(vsMachineTemplate.Spec.Template.Spec.DiskGiB) {
err := fmt.Errorf("MachineSpec %s WorkloadDiskGiB are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereWorkerConfig.Spec.DiskGiB, vsMachineTemplate.Spec.Template.Spec.DiskGiB)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereWorkerConfig.Spec.Template != vsMachineTemplate.Spec.Template.Spec.Template {
err := fmt.Errorf("MachineSpec %s Template are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereWorkerConfig.Spec.Template, vsMachineTemplate.Spec.Template.Spec.Template)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereWorkerConfig.Spec.Folder != vsMachineTemplate.Spec.Template.Spec.Folder {
err := fmt.Errorf("MachineSpec %s Folder are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereWorkerConfig.Spec.Folder, vsMachineTemplate.Spec.Template.Spec.Folder)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if len(vsMachineTemplate.Spec.Template.Spec.Network.Devices) == 0 {
err := fmt.Errorf("MachineSpec %s Template has no devices", vsMachineTemplate.Name)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereClusterConfig.Spec.Network != vsMachineTemplate.Spec.Template.Spec.Network.Devices[0].NetworkName {
err := fmt.Errorf("MachineSpec %s Template are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereClusterConfig.Spec.Network, vsMachineTemplate.Spec.Template.Spec.Network.Devices[0].NetworkName)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereWorkerConfig.Spec.Datastore != vsMachineTemplate.Spec.Template.Spec.Datastore {
err := fmt.Errorf("MachineSpec %s Datastore are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereWorkerConfig.Spec.Datastore, vsMachineTemplate.Spec.Template.Spec.Datastore)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereClusterConfig.Spec.Datacenter != vsMachineTemplate.Spec.Template.Spec.Datacenter {
err := fmt.Errorf("MachineSpec %s Datacenter are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereClusterConfig.Spec.Datacenter, vsMachineTemplate.Spec.Template.Spec.Datacenter)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereWorkerConfig.Spec.ResourcePool != vsMachineTemplate.Spec.Template.Spec.ResourcePool {
err := fmt.Errorf("MachineSpec %s ResourcePool are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereWorkerConfig.Spec.ResourcePool, vsMachineTemplate.Spec.Template.Spec.ResourcePool)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereClusterConfig.Spec.Server != vsMachineTemplate.Spec.Template.Spec.Server {
err := fmt.Errorf("MachineSpec %s Server are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereClusterConfig.Spec.Server, vsMachineTemplate.Spec.Template.Spec.Server)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if vsphereClusterConfig.Spec.Thumbprint != vsMachineTemplate.Spec.Template.Spec.Thumbprint {
err := fmt.Errorf("MachineSpec %s Template are not at desired value; target: %v, actual: %v", vsMachineTemplate.Name, vsphereClusterConfig.Spec.Thumbprint, vsMachineTemplate.Spec.Template.Spec.Thumbprint)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
e.T.Logf("Worker MachineTemplate values have matched expected values")
return nil
})
case v1alpha1.CloudStackDatacenterKind:
clusterConfig, err := v1alpha1.GetClusterConfig(clusterConfGitPath)
if err != nil {
return err
}
cloudstackMachineConfigs, err := v1alpha1.GetCloudStackMachineConfigs(clusterConfGitPath)
if err != nil {
return err
}
cloudstackWorkerConfig := cloudstackMachineConfigs[clusterConfig.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name]
return retrier.Retry(120, time.Second*10, func() error {
csMachineTemplate, err := e.KubectlClient.CloudstackWorkerNodesMachineTemplate(ctx, clusterConfig.Name, e.managementKubeconfigFilePath(), constants.EksaSystemNamespace)
if err != nil {
return err
}
if cloudstackWorkerConfig.Spec.Template.Name != csMachineTemplate.Spec.Spec.Spec.Template.Name {
err := fmt.Errorf("MachineSpec %s Template are not at desired value; target: %v, actual: %v", csMachineTemplate.Name, cloudstackWorkerConfig.Spec.Template, csMachineTemplate.Spec.Spec.Spec.Template)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if cloudstackWorkerConfig.Spec.ComputeOffering.Name != csMachineTemplate.Spec.Spec.Spec.Offering.Name {
err := fmt.Errorf("MachineSpec %s Offering are not at desired value; target: %v, actual: %v", csMachineTemplate.Name, cloudstackWorkerConfig.Spec.ComputeOffering, csMachineTemplate.Spec.Spec.Spec.Offering)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if !reflect.DeepEqual(cloudstackWorkerConfig.Spec.UserCustomDetails, csMachineTemplate.Spec.Spec.Spec.Details) {
err := fmt.Errorf("MachineSpec %s Details are not at desired value; target: %v, actual: %v", csMachineTemplate.Name, cloudstackWorkerConfig.Spec.UserCustomDetails, csMachineTemplate.Spec.Spec.Spec.Details)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
var symlinks []string
for key, value := range cloudstackWorkerConfig.Spec.Symlinks {
symlinks = append(symlinks, key+":"+value)
}
if strings.Join(symlinks, ",") != csMachineTemplate.Annotations["symlinks."+constants.CloudstackAnnotationSuffix] {
err := fmt.Errorf("MachineSpec %s Symlinks are not at desired value; target: %v, actual: %v", csMachineTemplate.Name, cloudstackWorkerConfig.Spec.Symlinks, csMachineTemplate.Annotations["symlinks."+constants.CloudstackAnnotationSuffix])
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
if !reflect.DeepEqual(cloudstackWorkerConfig.Spec.AffinityGroupIds, csMachineTemplate.Spec.Spec.Spec.AffinityGroupIDs) {
err := fmt.Errorf("MachineSpec %s AffinityGroupIds are not at desired value; target: %v, actual: %v", csMachineTemplate.Name, cloudstackWorkerConfig.Spec.AffinityGroupIds, csMachineTemplate.Spec.Spec.Spec.AffinityGroupIDs)
e.T.Logf("Waiting for WorkerNode Specs to match - %s", err.Error())
return err
}
e.T.Logf("Worker MachineTemplate values have matched expected values")
return nil
})
default:
return nil
}
}
func (e *ClusterE2ETest) waitForWorkerScaling(name string, targetvalue int) error {
e.T.Logf("Waiting for worker node group %v MachineDeployment to scale to target value %d", name, targetvalue)
ctx := context.Background()
return retrier.Retry(120, time.Second*10, func() error {
md, err := e.KubectlClient.GetMachineDeployment(ctx, fmt.Sprintf("%v-%v", e.ClusterName, name),
executables.WithKubeconfig(e.managementKubeconfigFilePath()),
executables.WithNamespace(constants.EksaSystemNamespace),
)
if err != nil {
e.T.Logf("Unable to get machine deployment: %v", err)
return err
}
r := int(md.Status.Replicas)
if r != targetvalue {
e.T.Logf("Waiting for worker node MachineDeployment %s replicas to scale; target: %d, actual: %d", md.Name, targetvalue, r)
return fmt.Errorf(" MachineDeployment %s replicas are not at desired scale; target: %d, actual: %d", md.Name, targetvalue, r)
}
e.T.Logf("Worker node MachineDeployment %s Ready replicas have reached target scale %d", md.Name, r)
e.T.Logf("All worker node MachineDeployments have reached target scale %d", targetvalue)
return nil
})
}
func (e *ClusterE2ETest) updateEKSASpecInGit(ctx context.Context, s *cluster.Spec, providersConfig providerConfig) (string, error) {
err := e.pullRemoteConfig(ctx)
if err != nil {
return "", err
}
p, err := e.writeEKSASpec(s, providersConfig.datacenterConfig, providersConfig.machineConfigs)
if err != nil {
return "", err
}
if err := e.pushConfigChanges(ctx); err != nil {
return "", err
}
e.T.Logf("Successfully updated version controlled cluster configuration")
return p, nil
}
func (e *ClusterE2ETest) pushConfigChanges(ctx context.Context) error {
p := e.clusterConfGitPath()
g := e.GitClient
if err := g.Add(p); err != nil {
return fmt.Errorf("adding cluster config changes at path %s: %v", p, err)
}
if err := e.pushStagedChanges(ctx, "EKS-A E2E Flux test configuration update"); err != nil {
return fmt.Errorf("failed to push config changes %v", err)
}
return nil
}
func (e *ClusterE2ETest) pullRemoteConfig(ctx context.Context) error {
g := e.GitClient
repoUpToDateErr := &git.RepositoryUpToDateError{}
if err := g.Pull(ctx, e.gitBranch()); err != nil {
if !errors.Is(err, repoUpToDateErr) {
return fmt.Errorf("pulling from remote before pushing config changes: %v", err)
}
e.T.Log(err.Error())
}
return nil
}
// todo: reuse logic in clustermanager to template resources
func (e *ClusterE2ETest) writeEKSASpec(s *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) (path string, err error) {
resourcesSpec, err := clustermarshaller.MarshalClusterSpec(s, datacenterConfig, machineConfigs)
if err != nil {
return "", err
}
p := e.clusterConfGitPath()
e.T.Logf("writing cluster config to path %s", p)
clusterConfGitPath, err := e.GitWriter.Write(p, resourcesSpec, filewriter.PersistentFile)
if err != nil {
return "", err
}
return clusterConfGitPath, nil
}
func (e *ClusterE2ETest) gitRepoName() string {
if e.ClusterConfig.FluxConfig.Spec.Github != nil {
return e.ClusterConfig.FluxConfig.Spec.Github.Repository
}
if e.ClusterConfig.FluxConfig.Spec.Git != nil {
r := e.ClusterConfig.FluxConfig.Spec.Git.RepositoryUrl
return strings.TrimSuffix(path.Base(r), filepath.Ext(r))
}
return ""
}
func (e *ClusterE2ETest) gitBranch() string {
return e.ClusterConfig.FluxConfig.Spec.Branch
}
func (e *ClusterE2ETest) clusterConfigPathFromName(clusterName string) string {
p := e.ClusterConfig.FluxConfig.Spec.ClusterConfigPath
if len(p) == 0 {
p = path.Join("clusters", e.ClusterName)
}
return path.Join(p, clusterName, constants.EksaSystemNamespace, eksaConfigFileName)
}
func (e *ClusterE2ETest) clusterConfGitPath() string {
return e.clusterConfigPathFromName(e.ClusterName)
}
func (e *ClusterE2ETest) clusterConfigGitPath() string {
return filepath.Join(e.GitWriter.Dir(), e.clusterConfGitPath())
}
func (e *ClusterE2ETest) clusterSpecFromGit() (*cluster.Spec, error) {
var opts []cluster.FileSpecBuilderOpt
if getBundlesOverride() == "true" {
// This makes sure that the cluster.Spec uses the same Bundles we pass to the CLI
// It avoids the budlesRef getting overwritten with whatever default Bundles the
// e2e test build is configured to use
opts = append(opts, cluster.WithOverrideBundlesManifest(defaultBundleReleaseManifestFile))
}
b := cluster.NewFileSpecBuilder(files.NewReader(), version.Get(), opts...)
s, err := b.Build(e.clusterConfigGitPath())
if err != nil {
return nil, fmt.Errorf("unable to build spec from git: %v", err)
}
return s, nil
}
func RequiredFluxGithubEnvVars() []string {
return fluxGithubRequiredEnvVars
}
func RequiredFluxGitCreateRepoEnvVars() []string {
return fluxGitCreateGenerateRepoEnvVars
}
| 1,103 |
eks-anywhere | aws | Go | package framework
import (
"context"
_ "embed"
"fmt"
"path/filepath"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/filewriter"
gitFactory "github.com/aws/eks-anywhere/pkg/git/factory"
)
func (e *ClusterE2ETest) NewGitTools(ctx context.Context, cluster *v1alpha1.Cluster, fluxConfig *v1alpha1.FluxConfig, writer filewriter.FileWriter, repoPath string) (*gitFactory.GitTools, error) {
if fluxConfig == nil {
return nil, nil
}
var localGitWriterPath string
var localGitRepoPath string
if repoPath == "" {
r := e.gitRepoName()
localGitWriterPath = filepath.Join("git", r)
localGitRepoPath = filepath.Join(cluster.Name, "git", r)
} else {
localGitWriterPath = repoPath
localGitRepoPath = repoPath
}
tools, err := gitFactory.Build(ctx, cluster, fluxConfig, writer, gitFactory.WithRepositoryDirectory(localGitRepoPath))
if err != nil {
return nil, fmt.Errorf("creating Git provider: %v", err)
}
if tools.Provider != nil {
err = tools.Provider.Validate(ctx)
if err != nil {
return nil, err
}
}
gitwriter, err := writer.WithDir(localGitWriterPath)
if err != nil {
return nil, fmt.Errorf("creating file writer: %v", err)
}
gitwriter.CleanUpTemp()
tools.Writer = gitwriter
return tools, nil
}
| 48 |
eks-anywhere | aws | Go | package framework
import (
"testing"
"github.com/aws/eks-anywhere/pkg/executables"
)
type HelmInstallConfig struct {
chartName string
chartURI string
chartVersion string
chartValues []string
HelmClient *executables.Helm
}
func WithHelmInstallConfig(t *testing.T, chartName, chartURI, chartVersion string, chartValues []string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.HelmInstallConfig = &HelmInstallConfig{
chartName: chartName,
chartURI: chartURI,
chartVersion: chartVersion,
chartValues: chartValues,
HelmClient: buildHelm(t),
}
}
}
| 28 |
eks-anywhere | aws | Go | package framework
import (
"context"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
const (
// NTP configuration environment variables.
ntpServersVar = "T_NTP_SERVERS"
// Bottlerocket configuration environment variables.
maxPodsVar = "T_BR_K8S_SETTINGS_MAX_PODS"
clusterDNSIPSVar = "T_BR_K8S_SETTINGS_CLUSTER_DNS_IPS"
allowedUnsafeSysctlsVar = "T_BR_K8S_SETTINGS_ALLOWED_UNSAFE_SYSCTLS"
// other constants.
defaultSSHUsername = "ec2-user"
privateKeyFileName = "eks-a-id_rsa"
)
var (
ntpServersRequiredVar = []string{ntpServersVar}
brKubernetesRequiredVar = []string{maxPodsVar, clusterDNSIPSVar, allowedUnsafeSysctlsVar}
)
// RequiredNTPServersEnvVars returns a slice of environment variables required for NTP tests.
func RequiredNTPServersEnvVars() []string {
return ntpServersRequiredVar
}
// RequiredBottlerocketKubernetesSettingsEnvVars returns a slice of environment variables required for Bottlerocket Kubernetes tests.
func RequiredBottlerocketKubernetesSettingsEnvVars() []string {
return brKubernetesRequiredVar
}
// GetNTPServersFromEnv returns a slice of NTP servers read from the NTP environment veriables.
func GetNTPServersFromEnv() []string {
serverFromEnv := os.Getenv(ntpServersVar)
return strings.Split(serverFromEnv, ",")
}
// GetBottlerocketKubernetesSettingsFromEnv returns a Bottlerocket Kubernetes settings read from the environment variables.
func GetBottlerocketKubernetesSettingsFromEnv() (allowedUnsafeSysclts, clusterDNSIPS []string, maxPods int, err error) {
allowedUnsafeSysclts = strings.Split(os.Getenv(allowedUnsafeSysctlsVar), ",")
clusterDNSIPS = strings.Split(os.Getenv(clusterDNSIPSVar), ",")
maxPods, err = strconv.Atoi(os.Getenv(maxPodsVar))
return allowedUnsafeSysclts, clusterDNSIPS, maxPods, err
}
// ValidateNTPConfig validates NTP servers are configured properly on all cluster nodes using SSH.
func (e *ClusterE2ETest) ValidateNTPConfig(osFamily v1alpha1.OSFamily) {
ctx := context.Background()
machines, err := e.KubectlClient.GetCAPIMachines(ctx, e.managementCluster(), e.ClusterName)
if err != nil {
e.T.Fatalf("Error getting machines: %v", err)
}
for _, machine := range machines {
if len(machine.Status.Addresses) > 0 {
e.T.Logf("Validating NTP servers for machine %s with IP %s", machine.Name, machine.Status.Addresses[0].Address)
e.validateNTP(ctx, osFamily, machine.Status.Addresses[0].Address)
}
}
}
func (e *ClusterE2ETest) validateNTP(ctx context.Context, osFamily v1alpha1.OSFamily, IP string) {
ssh := buildSSH(e.T)
var command []string
if osFamily == v1alpha1.Bottlerocket {
command = []string{"apiclient", "get", "settings.ntp"}
} else {
command = []string{"chronyc", "sourcestats"}
}
out, err := ssh.RunCommand(ctx, filepath.Join(e.ClusterName, privateKeyFileName), defaultSSHUsername, IP, command...)
if err != nil {
e.T.Fatalf("failed to validate NTP server: %v", err)
}
for _, server := range GetNTPServersFromEnv() {
if !strings.Contains(out, server) {
e.T.Fatalf("NTP Server [%s] not configured on machine", server)
}
e.T.Logf("NTP server [%s] is configured", server)
}
}
// ValidateBottlerocketKubernetesSettings validates Bottlerocket Kubernetes settings are configured properly on all cluster nodes using SSH.
func (e *ClusterE2ETest) ValidateBottlerocketKubernetesSettings() {
ctx := context.Background()
machines, err := e.KubectlClient.GetCAPIMachines(ctx, e.managementCluster(), e.ClusterName)
if err != nil {
e.T.Fatalf("Error getting machines: %v", err)
}
for _, machine := range machines {
if len(machine.Status.Addresses) > 0 {
e.T.Logf("Validating Bottlerocket Kubernetes settings for machine %s with IP %s", machine.Name, machine.Status.Addresses[0].Address)
e.validateBottlerocketKubernetesSettings(ctx, machine.Status.Addresses[0].Address)
}
}
}
// nolint:gocyclo
func (e *ClusterE2ETest) validateBottlerocketKubernetesSettings(ctx context.Context, IP string) {
ssh := buildSSH(e.T)
command := []string{"apiclient", "get", "settings.network.hostname"}
gotHostname, err := ssh.RunCommand(ctx, filepath.Join(e.ClusterName, privateKeyFileName), defaultSSHUsername, IP, command...)
if err != nil {
e.T.Errorf("failed to validate Bottlerocket Kubernetes settings: %v", err)
}
if strings.Contains(gotHostname, "etcd") {
e.T.Log("Skipping Bottlerocket Kubernetes settings validation for etcd node")
return
}
command = []string{"apiclient", "get", "settings.kubernetes.allowed-unsafe-sysctls"}
gotAllowedUnsafeSysctls, err := ssh.RunCommand(ctx, filepath.Join(e.ClusterName, privateKeyFileName), defaultSSHUsername, IP, command...)
if err != nil {
e.T.Errorf("failed to validate Bottlerocket Kubernetes settings: %v", err)
}
command = []string{"apiclient", "get", "settings.kubernetes.cluster-dns-ip"}
gotClusterDNSIPs, err := ssh.RunCommand(ctx, filepath.Join(e.ClusterName, privateKeyFileName), defaultSSHUsername, IP, command...)
if err != nil {
e.T.Errorf("failed to validate Bottlerocket Kubernetes settings: %v", err)
}
command = []string{"apiclient", "get", "settings.kubernetes.max-pods"}
gotMaxPods, err := ssh.RunCommand(ctx, filepath.Join(e.ClusterName, privateKeyFileName), defaultSSHUsername, IP, command...)
if err != nil {
e.T.Errorf("failed to validate Bottlerocket Kubernetes settings: %v", err)
}
expectedAllowedUnsafeSysctls, expectedClusterDNSIPs, expectedMaxPods, err := GetBottlerocketKubernetesSettingsFromEnv()
if err != nil {
e.T.Errorf("failed to get Bottlerocket Kubernetes settings from environment variables: %v", err)
}
for _, sysctl := range expectedAllowedUnsafeSysctls {
if !strings.Contains(gotAllowedUnsafeSysctls, sysctl) {
e.T.Errorf("Bottlerocket Kubernetes setting [allowed-unsafe-sysctls: %s] not configured on machine", sysctl)
}
e.T.Logf("Bottlerocket Kubernetes setting [allowed-unsafe-sysctls: %s] is configured", sysctl)
}
for _, ip := range expectedClusterDNSIPs {
if !strings.Contains(gotClusterDNSIPs, ip) {
e.T.Errorf("Bottlerocket Kubernetes setting [cluster-dns-ips: %s] not configured on machine", ip)
}
e.T.Logf("Bottlerocket Kubernetes setting [cluster-dns-ips: %s] is configured", ip)
}
if !strings.Contains(gotMaxPods, strconv.Itoa(expectedMaxPods)) {
e.T.Errorf("Bottlerocket Kubernetes setting [max-pods: %d] not configured on machine", expectedMaxPods)
}
e.T.Logf("Bottlerocket Kubernetes setting [max-pods: %d] is configured", expectedMaxPods)
}
| 168 |
eks-anywhere | aws | Go | package framework
import (
"encoding/json"
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
)
const LabelPrefix = "eksa.e2e"
func ValidateControlPlaneLabels(controlPlane v1alpha1.ControlPlaneConfiguration, node corev1.Node) error {
logger.V(4).Info("Validating control plane labels")
return validateLabels(controlPlane.Labels, node)
}
// ValidateControlPlaneFailureDomainLabels validate if Cloudstack provider replaces ds.meta_data.failuredomain with proper failuredomain name
// in control plane node label 'cluster.x-k8s.io/failure-domain'.
func ValidateControlPlaneFailureDomainLabels(controlPlane v1alpha1.ControlPlaneConfiguration, node corev1.Node) error {
if controlPlane.MachineGroupRef.Kind == "CloudStackMachineConfig" {
logger.V(4).Info("Validating control plane node failuredomain label")
return validateFailureDomainLabel(controlPlane.Labels, node)
}
return fmt.Errorf("ds.meta_data.failuredomain placeholder in node label is currently only supported in CloudStack provider")
}
func ValidateWorkerNodeLabels(w v1alpha1.WorkerNodeGroupConfiguration, node corev1.Node) error {
logger.V(4).Info("Validating worker node labels", "worker node group", w.Name)
return validateLabels(w.Labels, node)
}
// ValidateWorkerNodeFailureDomainLabels validate if Cloudstack provider replaces ds.meta_data.failuredomain with proper failuredomain name
// in worker group node label 'cluster.x-k8s.io/failure-domain'.
func ValidateWorkerNodeFailureDomainLabels(w v1alpha1.WorkerNodeGroupConfiguration, node corev1.Node) error {
if w.MachineGroupRef.Kind == v1alpha1.CloudStackMachineConfigKind {
logger.V(4).Info("Validating worker node failuredomain label", "worker node group", w.Name)
return validateFailureDomainLabel(w.Labels, node)
}
return fmt.Errorf("ds.meta_data.failuredomain placeholder in node label is currently only supported in CloudStack provider")
}
func validateLabels(expectedLabels map[string]string, node corev1.Node) error {
actualLabels := retrieveTestNodeLabels(node.Labels)
expectedBytes, _ := json.Marshal(expectedLabels)
actualBytes, _ := json.Marshal(actualLabels)
if !v1alpha1.MapEqual(expectedLabels, actualLabels) {
return fmt.Errorf("labels on node %v and corresponding configuration do not match; configured labels: %v; node labels: %v",
node.Name, string(expectedBytes), string(actualBytes))
}
logger.V(4).Info("expected labels from cluster spec configuration are present on the corresponding node", "node", node.Name, "node labels", string(actualBytes), "configuration labels", string(expectedBytes))
return nil
}
func retrieveTestNodeLabels(nodeLabels map[string]string) map[string]string {
labels := map[string]string{}
for key, val := range nodeLabels {
if strings.HasPrefix(key, LabelPrefix) {
labels[key] = val
}
}
return labels
}
func validateFailureDomainLabel(expectedLabels map[string]string, node corev1.Node) error {
if failuredomainSpecified, ok := expectedLabels[constants.FailureDomainLabelName]; ok {
if failuredomain, exist := node.Labels[constants.FailureDomainLabelName]; exist {
logger.V(4).Info("node label: ", constants.FailureDomainLabelName, failuredomain)
if failuredomainSpecified == constants.CloudstackFailureDomainPlaceholder && failuredomain == failuredomainSpecified {
return fmt.Errorf("value %s of label %s on node %s is not replaced with a failurdomain name by CloudStack provider",
constants.CloudstackFailureDomainPlaceholder,
constants.FailureDomainLabelName,
node.Name)
}
} else {
return fmt.Errorf("expected labels %s not found on node %s", constants.FailureDomainLabelName, node.Name)
}
}
return nil
}
| 85 |
eks-anywhere | aws | Go | package framework
import (
"fmt"
"sync"
"testing"
"time"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type MulticlusterE2ETest struct {
T *testing.T
ManagementCluster *ClusterE2ETest
WorkloadClusters WorkloadClusters
// MaxConcurrentWorkers defines the max number of workers for concurrent operations.
// If it's -1, it will use one worker per job.
MaxConcurrentWorkers int
workloadClusterNameCount int
}
func NewMulticlusterE2ETest(t *testing.T, managementCluster *ClusterE2ETest, workloadClusters ...*ClusterE2ETest) *MulticlusterE2ETest {
m := &MulticlusterE2ETest{
T: t,
ManagementCluster: managementCluster,
MaxConcurrentWorkers: -1,
}
m.WorkloadClusters = make(WorkloadClusters, len(workloadClusters))
for _, c := range workloadClusters {
c.clusterFillers = append(c.clusterFillers, api.WithManagementCluster(managementCluster.ClusterName))
c.ClusterName = m.NewWorkloadClusterName()
m.WithWorkloadClusters(c)
}
return m
}
// WithWorkloadClusters adds ClusterE2ETest's as workload clusters to the test.
func (m *MulticlusterE2ETest) WithWorkloadClusters(workloadClusters ...*ClusterE2ETest) {
for _, c := range workloadClusters {
m.WorkloadClusters[c.ClusterName] = &WorkloadCluster{
ClusterE2ETest: c,
ManagementClusterKubeconfigFile: m.ManagementCluster.KubeconfigFilePath,
}
}
}
// NewWorkloadClusterName returns a new unique name for a workload cluster based on the management cluster name.
// This is not thread safe.
func (m *MulticlusterE2ETest) NewWorkloadClusterName() string {
n := fmt.Sprintf("%s-w-%d", m.ManagementCluster.ClusterName, m.workloadClusterNameCount)
m.workloadClusterNameCount++
return n
}
func (m *MulticlusterE2ETest) RunInWorkloadClusters(flow func(*WorkloadCluster)) {
for name, w := range m.WorkloadClusters {
m.T.Logf("Running test flow in workload cluster %s", name)
flow(w)
}
}
// RunConcurrentlyInWorkloadClusters executes the given flow concurrently for all workload
// clusters. It respects MaxConcurrentWorkers.
func (m *MulticlusterE2ETest) RunConcurrentlyInWorkloadClusters(flow func(*WorkloadCluster)) {
jobs := make([]func(), 0, len(m.WorkloadClusters))
for name, wc := range m.WorkloadClusters {
w := wc
jobs = append(jobs, func() {
m.T.Logf("Running test flow in workload cluster %s", name)
flow(w)
})
}
m.RunConcurrently(jobs...)
}
// RunConcurrently runs the given jobs concurrently using no more than MaxConcurrentWorkers workers.
// If MaxConcurrentWorkers is -1, it will use one worker per job.
func (m *MulticlusterE2ETest) RunConcurrently(flows ...func()) {
wg := &sync.WaitGroup{}
workerNum := m.MaxConcurrentWorkers
if workerNum < 0 {
workerNum = len(flows)
}
jobs := make(chan func())
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for job := range jobs {
job()
}
}()
}
for _, flow := range flows {
jobs <- flow
}
close(jobs)
wg.Wait()
}
func (m *MulticlusterE2ETest) CreateManagementClusterForVersion(eksaVersion string, opts ...CommandOpt) {
m.ManagementCluster.GenerateClusterConfigForVersion(eksaVersion)
m.CreateManagementCluster(opts...)
}
// CreateManagementClusterWithConfig first generates a cluster config based on the management cluster test's
// previous configuration and proceeds to create a management cluster with the CLI.
func (m *MulticlusterE2ETest) CreateManagementClusterWithConfig(opts ...CommandOpt) {
m.ManagementCluster.GenerateClusterConfig()
m.ManagementCluster.CreateCluster(opts...)
}
func (m *MulticlusterE2ETest) CreateManagementCluster(opts ...CommandOpt) {
m.ManagementCluster.CreateCluster(opts...)
}
// CreateTinkerbellManagementCluster runs tinkerbell related steps for cluster creation.
func (m *MulticlusterE2ETest) CreateTinkerbellManagementCluster(opts ...CommandOpt) {
m.ManagementCluster.GenerateHardwareConfig()
m.ManagementCluster.PowerOffHardware()
m.ManagementCluster.CreateCluster(opts...)
}
func (m *MulticlusterE2ETest) DeleteManagementCluster() {
m.ManagementCluster.DeleteCluster()
}
// DeleteTinkerbellManagementCluster runs tinkerbell related steps for cluster deletion.
func (m *MulticlusterE2ETest) DeleteTinkerbellManagementCluster() {
m.ManagementCluster.StopIfFailed()
m.ManagementCluster.DeleteCluster()
m.ManagementCluster.ValidateHardwareDecommissioned()
}
// PushWorkloadClusterToGit builds the workload cluster config file for git and pushing changes to git.
func (m *MulticlusterE2ETest) PushWorkloadClusterToGit(w *WorkloadCluster, opts ...api.ClusterConfigFiller) {
err := retrier.Retry(10, 5*time.Second, func() error {
return m.ManagementCluster.pushWorkloadClusterToGit(w, opts...)
})
if err != nil {
w.T.Fatalf("Error pushing workload cluster changes to git: %v", err)
}
}
// DeleteWorkloadClusterFromGit deletes a workload cluster config file and pushes the changes to git.
func (m *MulticlusterE2ETest) DeleteWorkloadClusterFromGit(w *WorkloadCluster) {
err := retrier.Retry(10, 5*time.Second, func() error {
return m.ManagementCluster.deleteWorkloadClusterFromGit(w)
})
if err != nil {
w.T.Fatalf("Error deleting workload cluster changes from git: %v", err)
}
}
| 161 |
eks-anywhere | aws | Go | package framework
import (
"fmt"
"os"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/networkutils"
)
func PopIPFromEnv(ipPoolEnvVar string) (string, error) {
ipPool, err := networkutils.NewIPPoolFromEnv(ipPoolEnvVar)
if err != nil {
return "", fmt.Errorf("popping IP from environment: %v", err)
}
ip, popErr := ipPool.PopIP()
if popErr != nil {
return "", fmt.Errorf("failed to get an ip address from the cluster ip pool env var %s: %v", ipPoolEnvVar, popErr)
}
// PopIPFromEnv will remove the ip from the pool.
// Therefore, we rewrite the envvar to the system so the next caller can pick from remaining ips in the pool
err = ipPool.ToEnvVar(ipPoolEnvVar)
if err != nil {
return "", fmt.Errorf("popping IP from environment: %v", err)
}
return ip, nil
}
func GenerateUniqueIp(cidr string) (string, error) {
ipgen := networkutils.NewIPGenerator(&networkutils.DefaultNetClient{})
ip, err := ipgen.GenerateUniqueIP(cidr)
if err != nil {
return "", fmt.Errorf("getting unique IP for cidr %s: %v", cidr, err)
}
return ip, nil
}
func GetIP(cidr, ipEnvVar string) (string, error) {
value, ok := os.LookupEnv(ipEnvVar)
var ip string
var err error
if ok && value != "" {
ip, err = PopIPFromEnv(ipEnvVar)
if err != nil {
logger.V(2).Info("WARN: failed to pop ip from environment, attempting to generate unique ip")
ip, err = GenerateUniqueIp(cidr)
if err != nil {
return "", fmt.Errorf("failed to generate ip for cidr %s: %v", cidr, err)
}
}
} else {
ip, err = GenerateUniqueIp(cidr)
if err != nil {
return "", fmt.Errorf("failed to generate ip for cidr %s: %v", cidr, err)
}
}
return ip, nil
}
| 62 |
eks-anywhere | aws | Go | package framework
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/logger"
)
// ValidateControlPlaneNodeNameMatchCAPIMachineName validate if node name is same as CAPI machine name.
func ValidateControlPlaneNodeNameMatchCAPIMachineName(controlPlane v1alpha1.ControlPlaneConfiguration, node corev1.Node) error {
if controlPlane.MachineGroupRef.Kind == "CloudStackMachineConfig" {
logger.V(4).Info("Validating control plane node matches CAPI machine name")
return validateNodeNameMatchCAPIMachineName(node)
}
return nil
}
// ValidateWorkerNodeNameMatchCAPIMachineName validate if node name is same as CAPI machine name.
func ValidateWorkerNodeNameMatchCAPIMachineName(w v1alpha1.WorkerNodeGroupConfiguration, node corev1.Node) error {
if w.MachineGroupRef.Kind == "CloudStackMachineConfig" {
logger.V(4).Info("Validating worker node matches CAPI machine name")
return validateNodeNameMatchCAPIMachineName(node)
}
return nil
}
func validateNodeNameMatchCAPIMachineName(node corev1.Node) error {
capiMachineName, ok := node.Annotations["cluster.x-k8s.io/machine"]
if !ok {
return fmt.Errorf("CAPI machine name not found for node %s", node.Name)
}
if node.Name != capiMachineName {
return fmt.Errorf("node name %s not match CAPI machine name %s", node.Name, capiMachineName)
}
return nil
}
| 41 |
eks-anywhere | aws | Go | package framework
import (
"context"
"os"
"testing"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/pkg/nutanix"
"github.com/aws/eks-anywhere/internal/test/cleanup"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
clusterf "github.com/aws/eks-anywhere/test/framework/cluster"
)
const (
nutanixEndpoint = "T_NUTANIX_ENDPOINT"
nutanixPort = "T_NUTANIX_PORT"
nutanixAdditionalTrustBundle = "T_NUTANIX_ADDITIONAL_TRUST_BUNDLE"
nutanixInsecure = "T_NUTANIX_INSECURE"
nutanixMachineBootType = "T_NUTANIX_MACHINE_BOOT_TYPE"
nutanixMachineMemorySize = "T_NUTANIX_MACHINE_MEMORY_SIZE"
nutanixSystemDiskSize = "T_NUTANIX_SYSTEMDISK_SIZE"
nutanixMachineVCPUsPerSocket = "T_NUTANIX_MACHINE_VCPU_PER_SOCKET"
nutanixMachineVCPUSocket = "T_NUTANIX_MACHINE_VCPU_SOCKET"
nutanixPrismElementClusterName = "T_NUTANIX_PRISM_ELEMENT_CLUSTER_NAME"
nutanixSSHAuthorizedKey = "T_NUTANIX_SSH_AUTHORIZED_KEY"
nutanixSubnetName = "T_NUTANIX_SUBNET_NAME"
nutanixControlPlaneEndpointIP = "T_NUTANIX_CONTROL_PLANE_ENDPOINT_IP"
nutanixControlPlaneCidrVar = "T_NUTANIX_CONTROL_PLANE_CIDR"
nutanixPodCidrVar = "T_NUTANIX_POD_CIDR"
nutanixServiceCidrVar = "T_NUTANIX_SERVICE_CIDR"
nutanixTemplateNameUbuntu123Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_23"
nutanixTemplateNameUbuntu124Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_24"
nutanixTemplateNameUbuntu125Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_25"
nutanixTemplateNameUbuntu126Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_26"
nutanixTemplateNameUbuntu127Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27"
)
var requiredNutanixEnvVars = []string{
constants.EksaNutanixUsernameKey,
constants.EksaNutanixPasswordKey,
nutanixEndpoint,
nutanixPort,
nutanixAdditionalTrustBundle,
nutanixMachineBootType,
nutanixMachineMemorySize,
nutanixSystemDiskSize,
nutanixMachineVCPUsPerSocket,
nutanixMachineVCPUSocket,
nutanixPrismElementClusterName,
nutanixSSHAuthorizedKey,
nutanixSubnetName,
nutanixPodCidrVar,
nutanixServiceCidrVar,
nutanixTemplateNameUbuntu123Var,
nutanixTemplateNameUbuntu124Var,
nutanixTemplateNameUbuntu125Var,
nutanixTemplateNameUbuntu126Var,
nutanixTemplateNameUbuntu127Var,
nutanixInsecure,
}
type Nutanix struct {
t *testing.T
fillers []api.NutanixFiller
clusterFillers []api.ClusterFiller
client nutanix.PrismClient
controlPlaneEndpointIP string
cpCidr string
podCidr string
serviceCidr string
}
type NutanixOpt func(*Nutanix)
func NewNutanix(t *testing.T, opts ...NutanixOpt) *Nutanix {
checkRequiredEnvVars(t, requiredNutanixEnvVars)
nutanixProvider := &Nutanix{
t: t,
fillers: []api.NutanixFiller{
api.WithNutanixStringFromEnvVar(nutanixEndpoint, api.WithNutanixEndpoint),
api.WithNutanixIntFromEnvVar(nutanixPort, api.WithNutanixPort),
api.WithNutanixStringFromEnvVar(nutanixAdditionalTrustBundle, api.WithNutanixAdditionalTrustBundle),
api.WithNutanixStringFromEnvVar(nutanixMachineMemorySize, api.WithNutanixMachineMemorySize),
api.WithNutanixStringFromEnvVar(nutanixSystemDiskSize, api.WithNutanixMachineSystemDiskSize),
api.WithNutanixInt32FromEnvVar(nutanixMachineVCPUsPerSocket, api.WithNutanixMachineVCPUsPerSocket),
api.WithNutanixInt32FromEnvVar(nutanixMachineVCPUSocket, api.WithNutanixMachineVCPUSocket),
api.WithNutanixStringFromEnvVar(nutanixSSHAuthorizedKey, api.WithNutanixSSHAuthorizedKey),
api.WithNutanixBoolFromEnvVar(nutanixInsecure, api.WithNutanixInsecure),
// Assumption: generated clusterconfig by nutanix provider sets name as id type by default.
// for uuid specific id type, we will set it thru each specific test so that current CI
// works as is with name id type for following resources
api.WithNutanixStringFromEnvVar(nutanixPrismElementClusterName, api.WithNutanixPrismElementClusterName),
api.WithNutanixStringFromEnvVar(nutanixSubnetName, api.WithNutanixSubnetName),
},
}
nutanixProvider.controlPlaneEndpointIP = os.Getenv(nutanixControlPlaneEndpointIP)
nutanixProvider.cpCidr = os.Getenv(nutanixControlPlaneCidrVar)
nutanixProvider.podCidr = os.Getenv(nutanixPodCidrVar)
nutanixProvider.serviceCidr = os.Getenv(nutanixServiceCidrVar)
client, err := nutanix.NewPrismClient(os.Getenv(nutanixEndpoint), os.Getenv(nutanixPort), true)
if err != nil {
t.Fatalf("Failed to initialize Nutanix Prism Client: %v", err)
}
nutanixProvider.client = client
for _, opt := range opts {
opt(nutanixProvider)
}
return nutanixProvider
}
// RequiredNutanixEnvVars returns a list of environment variables needed for Nutanix tests.
func RequiredNutanixEnvVars() []string {
return requiredNutanixEnvVars
}
func (s *Nutanix) Name() string {
return "nutanix"
}
func (s *Nutanix) Setup() {}
// UpdateKubeConfig customizes generated kubeconfig for the provider.
func (s *Nutanix) UpdateKubeConfig(content *[]byte, clusterName string) error {
return nil
}
// CleanupVMs satisfies the test framework Provider.
func (s *Nutanix) CleanupVMs(clustername string) error {
return cleanup.NutanixTestResourcesCleanup(context.Background(), clustername, os.Getenv(nutanixEndpoint), os.Getenv(nutanixPort), true, true)
}
// ClusterConfigUpdates satisfies the test framework Provider.
func (s *Nutanix) ClusterConfigUpdates() []api.ClusterConfigFiller {
f := make([]api.ClusterFiller, 0, len(s.clusterFillers)+3)
f = append(f, s.clusterFillers...)
if s.controlPlaneEndpointIP != "" {
f = append(f, api.WithControlPlaneEndpointIP(s.controlPlaneEndpointIP))
} else {
clusterIP, err := GetIP(s.cpCidr, ClusterIPPoolEnvVar)
if err != nil {
s.t.Fatalf("failed to get cluster ip for test environment: %v", err)
}
f = append(f, api.WithControlPlaneEndpointIP(clusterIP))
}
if s.podCidr != "" {
f = append(f, api.WithPodCidr(s.podCidr))
}
if s.serviceCidr != "" {
f = append(f, api.WithServiceCidr(s.serviceCidr))
}
return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...), api.NutanixToConfigFiller(s.fillers...)}
}
func (s *Nutanix) WithProviderUpgrade(fillers ...api.NutanixFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(api.NutanixToConfigFiller(fillers...))
}
}
// WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all
// nutanix machine configs.
func (s *Nutanix) WithKubeVersionAndOS(osFamily anywherev1.OSFamily, kubeVersion anywherev1.KubernetesVersion) api.ClusterConfigFiller {
// TODO: Update tests to use this
panic("Not implemented for Nutanix yet")
}
// WithNewWorkerNodeGroup returns an api.ClusterFiller that adds a new workerNodeGroupConfiguration and
// a corresponding NutanixMachineConfig to the cluster config.
func (s *Nutanix) WithNewWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller {
// TODO: Implement for Nutanix provider
panic("Not implemented for Nutanix yet")
}
// WithUbuntu123Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.23
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu123Nutanix() NutanixOpt {
return func(v *Nutanix) {
v.fillers = append(v.fillers,
api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu123Var, api.WithNutanixMachineTemplateImageName),
api.WithOsFamilyForAllNutanixMachines(anywherev1.Ubuntu),
)
}
}
// WithUbuntu124Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.24
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu124Nutanix() NutanixOpt {
return func(v *Nutanix) {
v.fillers = append(v.fillers,
api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu124Var, api.WithNutanixMachineTemplateImageName),
api.WithOsFamilyForAllNutanixMachines(anywherev1.Ubuntu),
)
}
}
// WithUbuntu125Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.25
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu125Nutanix() NutanixOpt {
return func(v *Nutanix) {
v.fillers = append(v.fillers,
api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu125Var, api.WithNutanixMachineTemplateImageName),
api.WithOsFamilyForAllNutanixMachines(anywherev1.Ubuntu),
)
}
}
// WithUbuntu126Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.26
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu126Nutanix() NutanixOpt {
return func(v *Nutanix) {
v.fillers = append(v.fillers,
api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu126Var, api.WithNutanixMachineTemplateImageName),
api.WithOsFamilyForAllNutanixMachines(anywherev1.Ubuntu),
)
}
}
// WithUbuntu127Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.27
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu127Nutanix() NutanixOpt {
return func(v *Nutanix) {
v.fillers = append(v.fillers,
api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu127Var, api.WithNutanixMachineTemplateImageName),
api.WithOsFamilyForAllNutanixMachines(anywherev1.Ubuntu),
)
}
}
// WithUbuntu123NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.23
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu123NutanixUUID() NutanixOpt {
return func(v *Nutanix) {
name := os.Getenv(nutanixTemplateNameUbuntu123Var)
v.fillers = append(v.fillers, v.withUbuntuNutanixUUID(name)...)
}
}
// WithUbuntu124NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.24
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu124NutanixUUID() NutanixOpt {
return func(v *Nutanix) {
name := os.Getenv(nutanixTemplateNameUbuntu124Var)
v.fillers = append(v.fillers, v.withUbuntuNutanixUUID(name)...)
}
}
// WithUbuntu125NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.25
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu125NutanixUUID() NutanixOpt {
return func(v *Nutanix) {
name := os.Getenv(nutanixTemplateNameUbuntu125Var)
v.fillers = append(v.fillers, v.withUbuntuNutanixUUID(name)...)
}
}
// WithUbuntu126NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.26
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu126NutanixUUID() NutanixOpt {
return func(v *Nutanix) {
name := os.Getenv(nutanixTemplateNameUbuntu126Var)
v.fillers = append(v.fillers, v.withUbuntuNutanixUUID(name)...)
}
}
// WithUbuntu127NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.27
// and the "ubuntu" osFamily in all machine configs.
func WithUbuntu127NutanixUUID() NutanixOpt {
return func(v *Nutanix) {
name := os.Getenv(nutanixTemplateNameUbuntu127Var)
v.fillers = append(v.fillers, v.withUbuntuNutanixUUID(name)...)
}
}
func (s *Nutanix) withUbuntuNutanixUUID(name string) []api.NutanixFiller {
uuid, err := s.client.GetImageUUIDFromName(context.Background(), name)
if err != nil {
s.t.Fatalf("Failed to get UUID for image %s: %v", name, err)
}
return append([]api.NutanixFiller{},
api.WithNutanixMachineTemplateImageUUID(*uuid),
api.WithOsFamilyForAllNutanixMachines(anywherev1.Ubuntu),
)
}
// WithPrismElementClusterUUID returns a NutanixOpt that adds API fillers to use a PE Cluster UUID.
func WithPrismElementClusterUUID() NutanixOpt {
return func(v *Nutanix) {
name := os.Getenv(nutanixPrismElementClusterName)
uuid, err := v.client.GetClusterUUIDFromName(context.Background(), name)
if err != nil {
v.t.Fatalf("Failed to get UUID for image %s: %v", name, err)
}
v.fillers = append(v.fillers, api.WithNutanixPrismElementClusterUUID(*uuid))
}
}
// WithNutanixSubnetUUID returns a NutanixOpt that adds API fillers to use a Subnet UUID.
func WithNutanixSubnetUUID() NutanixOpt {
return func(v *Nutanix) {
name := os.Getenv(nutanixSubnetName)
uuid, err := v.client.GetSubnetUUIDFromName(context.Background(), name)
if err != nil {
v.t.Fatalf("Failed to get UUID for image %s: %v", name, err)
}
v.fillers = append(v.fillers, api.WithNutanixSubnetUUID(*uuid))
}
}
// UpdateNutanixUbuntuTemplate123Var returns NutanixFiller by reading the env var and setting machine config's
// image name parameter in the spec.
func UpdateNutanixUbuntuTemplate123Var() api.NutanixFiller {
return api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu123Var, api.WithNutanixMachineTemplateImageName)
}
// UpdateNutanixUbuntuTemplate124Var returns NutanixFiller by reading the env var and setting machine config's
// image name parameter in the spec.
func UpdateNutanixUbuntuTemplate124Var() api.NutanixFiller {
return api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu124Var, api.WithNutanixMachineTemplateImageName)
}
// UpdateNutanixUbuntuTemplate125Var returns NutanixFiller by reading the env var and setting machine config's
// image name parameter in the spec.
func UpdateNutanixUbuntuTemplate125Var() api.NutanixFiller {
return api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu125Var, api.WithNutanixMachineTemplateImageName)
}
// UpdateNutanixUbuntuTemplate126Var returns NutanixFiller by reading the env var and setting machine config's
// image name parameter in the spec.
func UpdateNutanixUbuntuTemplate126Var() api.NutanixFiller {
return api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu126Var, api.WithNutanixMachineTemplateImageName)
}
// UpdateNutanixUbuntuTemplate127Var returns NutanixFiller by reading the env var and setting machine config's
// image name parameter in the spec.
func UpdateNutanixUbuntuTemplate127Var() api.NutanixFiller {
return api.WithNutanixStringFromEnvVar(nutanixTemplateNameUbuntu127Var, api.WithNutanixMachineTemplateImageName)
}
// ClusterStateValidations returns a list of provider specific ClusterStateValidations.
func (s *Nutanix) ClusterStateValidations() []clusterf.StateValidation {
return []clusterf.StateValidation{}
}
| 352 |
eks-anywhere | aws | Go | package framework
import (
"context"
"fmt"
"net/url"
"os"
"path"
"path/filepath"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/internal/pkg/oidc"
"github.com/aws/eks-anywhere/pkg/executables"
)
const (
OIDCIssuerUrlVar = "T_OIDC_ISSUER_URL"
OIDCClientIdVar = "T_OIDC_CLIENT_ID"
OIDCKidVar = "T_OIDC_KID"
OIDCKeyFileVar = "T_OIDC_KEY_FILE"
)
var oidcRequiredEnvVars = []string{
OIDCIssuerUrlVar,
OIDCClientIdVar,
OIDCKidVar,
OIDCKeyFileVar,
}
func WithOIDC() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.addClusterConfigFillers(WithOIDCClusterConfig(e.T))
}
}
// WithOIDCClusterConfig returns a ClusterConfigFiller that adds the default
// OIDCConfig for E2E tests to the cluster Config and links it by name in the
// Cluster resource.
func WithOIDCClusterConfig(t T) api.ClusterConfigFiller {
checkRequiredEnvVars(t, oidcRequiredEnvVars)
name := defaultClusterName
return api.JoinClusterConfigFillers(
api.WithOIDCConfig(name,
api.WithOIDCRequiredClaims("kubernetesAccess", "true"),
api.WithOIDCGroupsPrefix("s3-oidc:"),
api.WithOIDCGroupsClaim("groups"),
api.WithOIDCUsernamePrefix("s3-oidc:"),
api.WithOIDCUsernameClaim("email"),
api.WithStringFromEnvVarOIDCConfig(OIDCIssuerUrlVar, api.WithOIDCIssuerUrl),
api.WithStringFromEnvVarOIDCConfig(OIDCClientIdVar, api.WithOIDCClientId),
),
api.ClusterToConfigFiller(
api.WithOIDCIdentityProviderRef(name),
),
)
}
func (e *ClusterE2ETest) ValidateOIDC() {
ctx := context.Background()
cluster := e.Cluster()
e.T.Log("Creating roles for OIDC")
err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, cluster, oidcRoles)
if err != nil {
e.T.Errorf("Error applying roles for oids: %v", err)
return
}
issuerUrl, err := url.Parse(os.Getenv(OIDCIssuerUrlVar))
if err != nil {
e.T.Errorf("Error parsing oidc issuer url: %v", err)
return
}
kid := os.Getenv(OIDCKidVar)
keyFile := os.Getenv(OIDCKeyFileVar)
e.T.Log("Generating OIDC JWT token")
jwt, err := oidc.NewJWT(
path.Join(issuerUrl.Host, issuerUrl.Path),
kid,
keyFile,
oidc.WithEmail("[email protected]"),
oidc.WithGroup("developers"),
oidc.WithRole("dev"),
oidc.WithKubernetesAccess(true),
oidc.WithAudience(kid),
)
if err != nil {
e.T.Errorf("Error generating JWT token for oidc: %v", err)
return
}
apiServerUrl, err := e.KubectlClient.GetApiServerUrl(ctx, cluster)
if err != nil {
e.T.Errorf("Error getting api server url: %v", err)
return
}
e.T.Log("Getting pods with OIDC token")
_, err = e.KubectlClient.GetPods(
ctx,
executables.WithKubeconfig(filepath.Join(e.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", e.ClusterName))),
executables.WithServer(apiServerUrl),
executables.WithToken(jwt),
executables.WithSkipTLSVerify(),
executables.WithAllNamespaces(),
)
if err != nil {
e.T.Errorf("Error getting pods: %v", err)
}
e.T.Log("Getting deployments with OIDC token")
_, err = e.KubectlClient.GetDeployments(
ctx,
executables.WithKubeconfig(filepath.Join(e.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", e.ClusterName))),
executables.WithServer(apiServerUrl),
executables.WithToken(jwt),
executables.WithSkipTLSVerify(),
executables.WithAllNamespaces(),
)
if err != nil {
e.T.Errorf("Error getting deployments: %v", err)
}
}
// WithOIDCEnvVarCheck returns a ClusterE2ETestOpt that checks for the required env vars.
func WithOIDCEnvVarCheck() ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
checkRequiredEnvVars(e.T, oidcRequiredEnvVars)
}
}
| 133 |
eks-anywhere | aws | Go | package framework
import (
"os"
"strings"
"github.com/aws/eks-anywhere/internal/pkg/api"
)
const (
vsphereHttpProxyVar = "T_HTTP_PROXY_VSPHERE"
vsphereHttpsProxyVar = "T_HTTPS_PROXY_VSPHERE"
vsphereNoProxyVar = "T_NO_PROXY_VSPHERE"
cloudstackHttpProxyVar = "T_HTTP_PROXY_CLOUDSTACK"
cloudstackHttpsProxyVar = "T_HTTPS_PROXY_CLOUDSTACK"
cloudstackNoProxyVar = "T_NO_PROXY_CLOUDSTACK"
tinkerbellHTTPProxyVar = "T_HTTP_PROXY_TINKERBELL"
tinkerbellHTTPSProxyVar = "T_HTTPS_PROXY_TINKERBELL"
tinkerbellNoProxyVar = "T_NO_PROXY_TINKERBELL"
)
var VsphereProxyRequiredEnvVars = ProxyRequiredEnvVars{
HttpProxy: vsphereHttpProxyVar,
HttpsProxy: vsphereHttpsProxyVar,
NoProxy: vsphereNoProxyVar,
}
var CloudstackProxyRequiredEnvVars = ProxyRequiredEnvVars{
HttpProxy: cloudstackHttpProxyVar,
HttpsProxy: cloudstackHttpsProxyVar,
NoProxy: cloudstackNoProxyVar,
}
// TinkerbellProxyRequiredEnvVars is for proxy related variables for tinkerbell.
var TinkerbellProxyRequiredEnvVars = ProxyRequiredEnvVars{
HttpProxy: tinkerbellHTTPProxyVar,
HttpsProxy: tinkerbellHTTPSProxyVar,
NoProxy: tinkerbellNoProxyVar,
}
type ProxyRequiredEnvVars struct {
HttpProxy string
HttpsProxy string
NoProxy string
}
func WithProxy(requiredEnvVars ProxyRequiredEnvVars) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
checkRequiredEnvVars(e.T, []string{requiredEnvVars.HttpProxy, requiredEnvVars.HttpsProxy, requiredEnvVars.NoProxy})
httpProxy := os.Getenv(requiredEnvVars.HttpProxy)
httpsProxy := os.Getenv(requiredEnvVars.HttpsProxy)
noProxies := os.Getenv(requiredEnvVars.NoProxy)
var noProxy []string
for _, data := range strings.Split(noProxies, ",") {
noProxy = append(noProxy, strings.TrimSpace(data))
}
e.clusterFillers = append(e.clusterFillers,
api.WithProxyConfig(httpProxy, httpsProxy, noProxy),
)
}
}
| 63 |
eks-anywhere | aws | Go | package framework
import "github.com/aws/eks-anywhere/pkg/files"
func newFileReader() *files.Reader {
return files.NewReader(files.WithEKSAUserAgent("e2e-test", testBranch()))
}
| 8 |
eks-anywhere | aws | Go | package framework
import (
"context"
"encoding/base64"
"net"
"os"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/constants"
)
const (
RegistryEndpointVar = "T_REGISTRY_MIRROR_ENDPOINT"
RegistryPortVar = "T_REGISTRY_MIRROR_PORT"
RegistryUsernameVar = "T_REGISTRY_MIRROR_USERNAME"
RegistryPasswordVar = "T_REGISTRY_MIRROR_PASSWORD"
RegistryCACertVar = "T_REGISTRY_MIRROR_CA_CERT"
RegistryEndpointTinkerbellVar = "T_REGISTRY_MIRROR_ENDPOINT_TINKERBELL"
RegistryPortTinkerbellVar = "T_REGISTRY_MIRROR_PORT_TINKERBELL"
RegistryUsernameTinkerbellVar = "T_REGISTRY_MIRROR_USERNAME_TINKERBELL"
RegistryPasswordTinkerbellVar = "T_REGISTRY_MIRROR_PASSWORD_TINKERBELL"
RegistryCACertTinkerbellVar = "T_REGISTRY_MIRROR_CA_CERT_TINKERBELL"
RegistryMirrorDefaultSecurityGroup = "T_REGISTRY_MIRROR_DEFAULT_SECURITY_GROUP"
RegistryMirrorAirgappedSecurityGroup = "T_REGISTRY_MIRROR_AIRGAPPED_SECURITY_GROUP"
PrivateRegistryEndpointVar = "T_PRIVATE_REGISTRY_MIRROR_ENDPOINT"
PrivateRegistryPortVar = "T_PRIVATE_REGISTRY_MIRROR_PORT"
PrivateRegistryUsernameVar = "T_PRIVATE_REGISTRY_MIRROR_USERNAME"
PrivateRegistryPasswordVar = "T_PRIVATE_REGISTRY_MIRROR_PASSWORD"
PrivateRegistryCACertVar = "T_PRIVATE_REGISTRY_MIRROR_CA_CERT"
PrivateRegistryEndpointTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_ENDPOINT_TINKERBELL"
PrivateRegistryPortTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_PORT_TINKERBELL"
PrivateRegistryUsernameTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_USERNAME_TINKERBELL"
PrivateRegistryPasswordTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_PASSWORD_TINKERBELL"
PrivateRegistryCACertTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_CA_CERT_TINKERBELL"
)
var (
registryMirrorRequiredEnvVars = []string{RegistryEndpointVar, RegistryPortVar, RegistryUsernameVar, RegistryPasswordVar, RegistryCACertVar}
registryMirrorTinkerbellRequiredEnvVars = []string{RegistryEndpointTinkerbellVar, RegistryPortTinkerbellVar, RegistryUsernameTinkerbellVar, RegistryPasswordTinkerbellVar, RegistryCACertTinkerbellVar}
registryMirrorDockerAirgappedRequiredEnvVars = []string{RegistryMirrorDefaultSecurityGroup, RegistryMirrorAirgappedSecurityGroup}
privateRegistryMirrorRequiredEnvVars = []string{PrivateRegistryEndpointVar, PrivateRegistryPortVar, PrivateRegistryUsernameVar, PrivateRegistryPasswordVar, PrivateRegistryCACertVar}
privateRegistryMirrorTinkerbellRequiredEnvVars = []string{PrivateRegistryEndpointTinkerbellVar, PrivateRegistryPortTinkerbellVar, PrivateRegistryUsernameTinkerbellVar, PrivateRegistryPasswordTinkerbellVar, PrivateRegistryCACertTinkerbellVar}
)
// WithRegistryMirrorInsecureSkipVerify sets up e2e for registry mirrors with InsecureSkipVerify option.
func WithRegistryMirrorInsecureSkipVerify(providerName string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
setupRegistryMirrorEndpointAndCert(e, providerName, true)
}
}
// WithRegistryMirrorEndpointAndCert sets up e2e for registry mirrors.
func WithRegistryMirrorEndpointAndCert(providerName string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
setupRegistryMirrorEndpointAndCert(e, providerName, false)
}
}
// WithAuthenticatedRegistryMirror sets up e2e for authenticated registry mirrors.
func WithAuthenticatedRegistryMirror(providerName string) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
var endpoint, hostPort, username, password, registryCert string
port := "443"
switch providerName {
case constants.TinkerbellProviderName:
checkRequiredEnvVars(e.T, privateRegistryMirrorTinkerbellRequiredEnvVars)
endpoint = os.Getenv(PrivateRegistryEndpointTinkerbellVar)
hostPort = net.JoinHostPort(endpoint, os.Getenv(PrivateRegistryPortTinkerbellVar))
username = os.Getenv(PrivateRegistryUsernameTinkerbellVar)
password = os.Getenv(PrivateRegistryPasswordTinkerbellVar)
registryCert = os.Getenv(PrivateRegistryCACertTinkerbellVar)
if os.Getenv(PrivateRegistryPortTinkerbellVar) != "" {
port = os.Getenv(PrivateRegistryPortTinkerbellVar)
}
default:
checkRequiredEnvVars(e.T, privateRegistryMirrorRequiredEnvVars)
endpoint = os.Getenv(PrivateRegistryEndpointVar)
hostPort = net.JoinHostPort(endpoint, os.Getenv(PrivateRegistryPortVar))
username = os.Getenv(PrivateRegistryUsernameVar)
password = os.Getenv(PrivateRegistryPasswordVar)
registryCert = os.Getenv(PrivateRegistryCACertVar)
if os.Getenv(PrivateRegistryPortVar) != "" {
port = os.Getenv(PrivateRegistryPortVar)
}
}
// Set env vars for helm login/push
err := os.Setenv("REGISTRY_USERNAME", username)
if err != nil {
e.T.Fatalf("unable to set REGISTRY_USERNAME: %v", err)
}
err = os.Setenv("REGISTRY_PASSWORD", password)
if err != nil {
e.T.Fatalf("unable to set REGISTRY_PASSWORD: %v", err)
}
err = buildDocker(e.T).Login(context.Background(), hostPort, username, password)
if err != nil {
e.T.Fatalf("error logging into docker registry %s: %v", hostPort, err)
}
certificate, err := base64.StdEncoding.DecodeString(registryCert)
if err == nil {
e.clusterFillers = append(e.clusterFillers,
api.WithRegistryMirror(endpoint, port, string(certificate), true, false),
)
}
}
}
func RequiredRegistryMirrorEnvVars() []string {
registryMirrorRequiredEnvVars = append(registryMirrorRequiredEnvVars, registryMirrorTinkerbellRequiredEnvVars...)
registryMirrorRequiredEnvVars = append(registryMirrorRequiredEnvVars, privateRegistryMirrorRequiredEnvVars...)
registryMirrorRequiredEnvVars = append(registryMirrorRequiredEnvVars, privateRegistryMirrorTinkerbellRequiredEnvVars...)
return append(registryMirrorRequiredEnvVars, registryMirrorDockerAirgappedRequiredEnvVars...)
}
func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, insecureSkipVerify bool) {
var endpoint, hostPort, username, password, registryCert string
port := "443"
switch providerName {
case constants.TinkerbellProviderName:
checkRequiredEnvVars(e.T, registryMirrorTinkerbellRequiredEnvVars)
endpoint = os.Getenv(RegistryEndpointTinkerbellVar)
hostPort = net.JoinHostPort(endpoint, os.Getenv(RegistryPortTinkerbellVar))
username = os.Getenv(RegistryUsernameTinkerbellVar)
password = os.Getenv(RegistryPasswordTinkerbellVar)
registryCert = os.Getenv(RegistryCACertTinkerbellVar)
if os.Getenv(RegistryPortTinkerbellVar) != "" {
port = os.Getenv(RegistryPortTinkerbellVar)
}
default:
checkRequiredEnvVars(e.T, registryMirrorRequiredEnvVars)
endpoint = os.Getenv(RegistryEndpointVar)
hostPort = net.JoinHostPort(endpoint, os.Getenv(RegistryPortVar))
username = os.Getenv(RegistryUsernameVar)
password = os.Getenv(RegistryPasswordVar)
registryCert = os.Getenv(RegistryCACertVar)
if os.Getenv(RegistryPortVar) != "" {
port = os.Getenv(RegistryPortVar)
}
}
err := buildDocker(e.T).Login(context.Background(), hostPort, username, password)
if err != nil {
e.T.Fatalf("error logging into docker registry %s: %v", hostPort, err)
}
certificate, err := base64.StdEncoding.DecodeString(registryCert)
if err == nil {
e.clusterFillers = append(e.clusterFillers,
api.WithRegistryMirror(endpoint, port, string(certificate), false, insecureSkipVerify),
)
}
// Set env vars for helm login/push
err = os.Setenv("REGISTRY_USERNAME", username)
if err != nil {
e.T.Fatalf("unable to set REGISTRY_USERNAME: %v", err)
}
err = os.Setenv("REGISTRY_PASSWORD", password)
if err != nil {
e.T.Fatalf("unable to set REGISTRY_PASSWORD: %v", err)
}
}
| 167 |
eks-anywhere | aws | Go | package framework
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/internal/pkg/files"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/manifests/releases"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/validations"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
prodReleasesManifest = "https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml"
releaseBinaryName = "eksctl-anywhere"
BranchNameEnvVar = "T_BRANCH_NAME"
defaultTestBranch = "main"
)
// GetLatestMinorReleaseFromTestBranch inspects the T_BRANCH_NAME environment variable for a
// branch to retrieve the latest released CLI version. If T_BRANCH_NAME is main, it returns
// the latest minor release.
//
// If T_BRANCH_NAME is not main, it expects it to be of the format release-<major>.<minor>
// and will use the <major>.<minor> to retrieve the previous minor release. For example, if the
// release branch is release-0.2 it will retrieve the latest 0.1 release.
func GetLatestMinorReleaseFromTestBranch() (*releasev1alpha1.EksARelease, error) {
testBranch := testBranch()
if testBranch == "main" {
return GetLatestMinorReleaseFromMain()
}
testBranchFirstSemver, err := semverForReleaseBranch(testBranch)
if err != nil {
return nil, err
}
return GetPreviousMinorReleaseFromVersion(testBranchFirstSemver)
}
// EKSAVersionForTestBinary returns the "future" EKS-A version for the tested binary based on the TEST_BRANCH name.
// For main, it returns the next minor version.
// For a release branch, it returns the next path version for that release minor version.
func EKSAVersionForTestBinary() (string, error) {
if testBranch := testBranch(); testBranch != "main" {
return eksaVersionForReleaseBranch(testBranch)
}
return eksaVersionForMain()
}
func eksaVersionForMain() (string, error) {
latestRelease, err := GetLatestMinorReleaseFromMain()
if err != nil {
return "", err
}
latestReleaseSemVer, err := semver.New(latestRelease.Version)
if err != nil {
return "", errors.Wrapf(err, "parsing version for release %s", latestRelease.Version)
}
localVersion := *latestReleaseSemVer
localVersion.Patch = 0
localVersion.Minor++
return localVersion.String(), nil
}
func eksaVersionForReleaseBranch(branch string) (string, error) {
semVer, err := semverForReleaseBranch(branch)
if err != nil {
return "", err
}
releases, err := prodReleases()
if err != nil {
return "", err
}
var latestReleaseSemVer *semver.Version
latestRelease := GetLatestPatchRelease(releases, semVer)
if latestRelease != nil {
latestReleaseSemVer, err = semver.New(latestRelease.Version)
if err != nil {
return "", errors.Wrapf(err, "parsing version for release %s", latestRelease.Version)
}
localVersion := *latestReleaseSemVer
localVersion.Patch++
} else {
// if no patch version for the release branch, this is an unreleased minor version
// so the next version will be x.x.0
latestReleaseSemVer = semVer
}
return latestReleaseSemVer.String(), nil
}
func getLatestDevRelease() (*releasev1alpha1.EksARelease, error) {
releases, err := devReleases()
if err != nil {
return nil, err
}
return latestRelease(releases)
}
func GetLatestMinorReleaseBinaryFromMain() (binaryPath string, err error) {
return getBinaryFromRelease(GetLatestMinorReleaseFromMain())
}
func GetLatestMinorReleaseFromMain() (*releasev1alpha1.EksARelease, error) {
releases, err := prodReleases()
if err != nil {
return nil, err
}
return latestRelease(releases)
}
func semverForReleaseBranch(branch string) (*semver.Version, error) {
majorMinor := getMajorMinorFromTestBranch(branch)
testBranchFirstVersion := fmt.Sprintf("%s.0", majorMinor)
testBranchFirstSemver, err := semver.New(testBranchFirstVersion)
if err != nil {
return nil, fmt.Errorf("can't extract semver from release branch [%s]: %v", branch, err)
}
return testBranchFirstSemver, nil
}
func latestRelease(releases *releasev1alpha1.Release) (*releasev1alpha1.EksARelease, error) {
var latestRelease *releasev1alpha1.EksARelease
for _, release := range releases.Spec.Releases {
if release.Version == releases.Spec.LatestVersion {
latestRelease = &release
break
}
}
if latestRelease == nil {
return nil, fmt.Errorf("releases manifest doesn't contain latest release %s", releases.Spec.LatestVersion)
}
return latestRelease, nil
}
// GetPreviousMinorReleaseFromVersion calculates the previous minor release by decrementing the
// version minor number, then retrieves the latest <major>.<minor>.<patch> for the calculated
// version.
func GetPreviousMinorReleaseFromVersion(version *semver.Version) (*releasev1alpha1.EksARelease, error) {
releases, err := prodReleases()
if err != nil {
return nil, err
}
release, err := getLatestPrevMinorRelease(releases, version)
if err != nil {
return nil, err
}
return release, nil
}
func GetReleaseBinaryFromVersion(version *semver.Version) (binaryPath string, err error) {
releases, err := prodReleases()
if err != nil {
return "", err
}
var targetVersion *releasev1alpha1.EksARelease
for _, release := range releases.Spec.Releases {
releaseVersion := newVersion(release.Version)
if releaseVersion == version {
targetVersion = &release
}
}
binaryPath, err = getBinary(targetVersion)
if err != nil {
return "", fmt.Errorf("failed getting binary for specified version %s: %s", version.String(), err)
}
return binaryPath, nil
}
// NewEKSAReleasePackagedBinary builds a new EKSAReleasePackagedBinary.
func NewEKSAReleasePackagedBinary(release *releasev1alpha1.EksARelease) *EKSAReleasePackagedBinary {
return &EKSAReleasePackagedBinary{release}
}
// EKSAReleasePackagedBinary decorates an EKSA release with extra functionality.
type EKSAReleasePackagedBinary struct {
*releasev1alpha1.EksARelease
}
// BinaryPath implements EKSAPackagedBinary.
func (b *EKSAReleasePackagedBinary) BinaryPath() (string, error) {
return getBinary(b.EksARelease)
}
// Version returns the eks-a release version.
func (b *EKSAReleasePackagedBinary) Version() string {
return b.EksARelease.Version
}
func getBinary(release *releasev1alpha1.EksARelease) (string, error) {
r := platformAwareRelease{release}
latestReleaseBinaryFolder := filepath.Join("bin", r.Version)
latestReleaseBinaryPath := filepath.Join(latestReleaseBinaryFolder, releaseBinaryName)
if !validations.FileExists(latestReleaseBinaryPath) {
logger.Info("Downloading binary for EKS-A release", r.Version, latestReleaseBinaryPath)
err := os.MkdirAll(latestReleaseBinaryFolder, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed creating directory ./%s: %s", latestReleaseBinaryFolder, err)
}
binaryUri, err := r.binaryUri()
if err != nil {
return "", fmt.Errorf("determining URI for EKS-A binary: %v", err)
}
err = files.GzipFileDownloadExtract(binaryUri, releaseBinaryName, latestReleaseBinaryFolder)
if err != nil {
return "", fmt.Errorf("failed extracting binary for EKS-A release [%s] to path ./%s: %s", r.Version, latestReleaseBinaryPath, err)
}
}
return latestReleaseBinaryPath, nil
}
func getBinaryFromRelease(release *releasev1alpha1.EksARelease, chainedErr error) (binaryPath string, err error) {
if chainedErr != nil {
return "", err
}
binaryPath, err = getBinary(release)
if err != nil {
return "", fmt.Errorf("failed getting binary for release [%s]: %v", release.Version, err)
}
return binaryPath, nil
}
type platformAwareRelease struct {
*releasev1alpha1.EksARelease
}
func (p *platformAwareRelease) binaryUri() (binaryUri string, err error) {
r := runtime.GOOS
switch r {
case "darwin":
return p.EksABinary.DarwinBinary.URI, nil
case "linux":
return p.EksABinary.LinuxBinary.URI, nil
default:
return "", fmt.Errorf("unsupported runtime %s", r)
}
}
func prodReleases() (release *releasev1alpha1.Release, err error) {
return getReleases(prodReleasesManifest)
}
func devReleases() (release *releasev1alpha1.Release, err error) {
return getReleases(devReleaseURL())
}
func getReleases(url string) (release *releasev1alpha1.Release, err error) {
reader := newFileReader()
logger.Info("Reading release manifest", "manifest", url)
releases, err := releases.ReadReleasesFromURL(reader, url)
if err != nil {
return nil, err
}
return releases, nil
}
func getLatestPrevMinorRelease(releases *releasev1alpha1.Release, releaseBranchVersion *semver.Version) (*releasev1alpha1.EksARelease, error) {
targetRelease := &releasev1alpha1.EksARelease{
Version: "",
BundleManifestUrl: "",
}
latestPrevMinorReleaseVersion := newVersion("0.0.0")
for _, release := range releases.Spec.Releases {
releaseVersion := newVersion(release.Version)
if releaseVersion.LessThan(releaseBranchVersion) && releaseVersion.Minor != releaseBranchVersion.Minor && releaseVersion.GreaterThan(latestPrevMinorReleaseVersion) {
*targetRelease = release
latestPrevMinorReleaseVersion = releaseVersion
}
}
if targetRelease.Version == "" {
return nil, fmt.Errorf("releases manifest doesn't contain a version of the previous minor release")
}
return targetRelease, nil
}
// GetLatestPatchRelease return the latest patch version for the major.minor release version.
// If releases doesn't contain a major.minor for version, it returns nil.
func GetLatestPatchRelease(releases *releasev1alpha1.Release, version *semver.Version) *releasev1alpha1.EksARelease {
var release *releasev1alpha1.EksARelease
current := newVersion("0.0.0")
for _, r := range releases.Spec.Releases {
r := r
rv := newVersion(r.Version)
if rv.SameMajor(version) && rv.SameMinor(version) && rv.GreaterThan(current) {
release = &r
current = version
}
}
return release
}
// GetLatestProductionPatchRelease retrieves the latest patch release for version from the
// production release manifest. If the production release manifest does not contain a release for
// the major.minor of version it errors.
func GetLatestProductionPatchRelease(version *semver.Version) (*releasev1alpha1.EksARelease, error) {
releases, err := prodReleases()
if err != nil {
return nil, err
}
release := GetLatestPatchRelease(releases, version)
if release == nil {
return nil, fmt.Errorf("no release found in the production release bundle for %v", version)
}
return release, nil
}
func getMajorMinorFromTestBranch(testBranch string) string {
return strings.TrimPrefix(testBranch, "release-")
}
func devReleaseURL() string {
testBranch := testBranch()
if testBranch == "main" {
return "https://dev-release-assets.eks-anywhere.model-rocket.aws.dev/eks-a-release.yaml"
}
return fmt.Sprintf("https://dev-release-assets.eks-anywhere.model-rocket.aws.dev/%s/eks-a-release.yaml", testBranch)
}
func testBranch() string {
return getEnvWithDefault(BranchNameEnvVar, defaultTestBranch)
}
| 359 |
eks-anywhere | aws | Go | package framework
import (
"fmt"
"os"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"github.com/aws/eks-anywhere/internal/pkg/api"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
clusterf "github.com/aws/eks-anywhere/test/framework/cluster"
)
const (
snowAMIIDUbuntu123 = "T_SNOW_AMIID_UBUNTU_1_23"
snowAMIIDUbuntu124 = "T_SNOW_AMIID_UBUNTU_1_24"
snowAMIIDUbuntu125 = "T_SNOW_AMIID_UBUNTU_1_25"
snowAMIIDUbuntu126 = "T_SNOW_AMIID_UBUNTU_1_26"
snowAMIIDUbuntu127 = "T_SNOW_AMIID_UBUNTU_1_27"
snowDevices = "T_SNOW_DEVICES"
snowControlPlaneCidr = "T_SNOW_CONTROL_PLANE_CIDR"
snowPodCidr = "T_SNOW_POD_CIDR"
snowCredentialsFile = "EKSA_AWS_CREDENTIALS_FILE"
snowCertificatesFile = "EKSA_AWS_CA_BUNDLES_FILE"
snowIPPoolIPStart = "T_SNOW_IPPOOL_IPSTART"
snowIPPoolIPEnd = "T_SNOW_IPPOOL_IPEND"
snowIPPoolGateway = "T_SNOW_IPPOOL_GATEWAY"
snowIPPoolSubnet = "T_SNOW_IPPOOL_SUBNET"
snowEc2TagPrefix = "sigs.k8s.io/cluster-api-provider-aws-snow/cluster/"
)
var requiredSnowEnvVars = []string{
snowDevices,
snowControlPlaneCidr,
snowCredentialsFile,
snowCertificatesFile,
}
type Snow struct {
t *testing.T
fillers []api.SnowFiller
clusterFillers []api.ClusterFiller
cpCidr string
podCidr string
}
type SnowOpt func(*Snow)
func NewSnow(t *testing.T, opts ...SnowOpt) *Snow {
checkRequiredEnvVars(t, requiredSnowEnvVars)
s := &Snow{
t: t,
}
s.cpCidr = os.Getenv(snowControlPlaneCidr)
s.podCidr = os.Getenv(snowPodCidr)
for _, opt := range opts {
opt(s)
}
return s
}
func (s *Snow) Name() string {
return "snow"
}
func (s *Snow) Setup() {}
// UpdateKubeConfig customizes generated kubeconfig for the provider.
func (s *Snow) UpdateKubeConfig(content *[]byte, clusterName string) error {
return nil
}
// ClusterConfigUpdates satisfies the test framework Provider.
func (s *Snow) ClusterConfigUpdates() []api.ClusterConfigFiller {
s.t.Logf("Searching for free IP for Snow Control Plane in CIDR %s", s.cpCidr)
ip, err := GenerateUniqueIp(s.cpCidr)
if err != nil {
s.t.Fatalf("failed to generate control plane ip for snow [cidr=%s]: %v", s.cpCidr, err)
}
s.t.Logf("Selected IP %s for Snow Control Plane", ip)
f := make([]api.ClusterFiller, 0, len(s.clusterFillers)+2)
f = append(f, s.clusterFillers...)
f = append(f, api.WithControlPlaneEndpointIP(ip))
if s.podCidr != "" {
f = append(f, api.WithPodCidr(s.podCidr))
}
return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...), api.SnowToConfigFiller(s.fillers...)}
}
// CleanupVMs satisfies the test framework Provider.
func (s *Snow) CleanupVMs(clusterName string) error {
snowDeviceIPs := strings.Split(os.Getenv(snowDevices), ",")
s.t.Logf("Cleaning ec2 instances of %s in snow devices: %v", clusterName, snowDeviceIPs)
var res []error
for _, ip := range snowDeviceIPs {
sess, err := newSession(ip)
if err != nil {
res = append(res, fmt.Errorf("Cannot create session to snow device: %w", err))
continue
}
ec2Client := ec2.New(sess)
// snow device doesn't support filter hitherto
out, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{})
if err != nil {
res = append(res, fmt.Errorf("Cannot get ec2 instances from snow device: %w", err))
continue
}
var ownedInstanceIds []*string
for _, reservation := range out.Reservations {
for _, instance := range reservation.Instances {
if isNotTerminatedAndHasTag(instance, snowEc2TagPrefix+clusterName) {
ownedInstanceIds = append(ownedInstanceIds, instance.InstanceId)
}
}
}
if len(ownedInstanceIds) != 0 {
if _, err = ec2Client.TerminateInstances(&ec2.TerminateInstancesInput{
InstanceIds: ownedInstanceIds,
}); err != nil {
res = append(res, fmt.Errorf("Cannot terminate ec2 instances from snow device: %w", err))
} else {
s.t.Logf("Cluster %s EC2 instances have been cleaned from device %s: %+v", clusterName, ip, ownedInstanceIds)
}
} else {
s.t.Logf("No EC2 instances to cleanup for snow device: %s", ip)
}
cleanedKeys, err := cleanupKeypairs(ec2Client, clusterName)
if err != nil {
res = append(res, err)
} else {
s.t.Logf("KeyPairs has been cleaned: %+v", cleanedKeys)
}
}
return kerrors.NewAggregate(res)
}
func cleanupKeypairs(ec2Client *ec2.EC2, clusterName string) ([]*string, error) {
out, err := ec2Client.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{})
if err != nil {
return nil, err
}
var keyPairNames []*string
for _, keyPair := range out.KeyPairs {
if strings.Contains(*keyPair.KeyName, clusterName) {
keyPairNames = append(keyPairNames, keyPair.KeyName)
}
}
var errs []error
for _, keyPairName := range keyPairNames {
if _, err := ec2Client.DeleteKeyPair(&ec2.DeleteKeyPairInput{
KeyName: keyPairName,
}); err != nil {
errs = append(errs, err)
}
}
return keyPairNames, kerrors.NewAggregate(errs)
}
func isNotTerminatedAndHasTag(instance *ec2.Instance, tag string) bool {
if *instance.State.Name == "terminated" {
return false
}
for _, t := range instance.Tags {
if *t.Key == tag {
return true
}
}
return false
}
func newSession(ip string) (*session.Session, error) {
sess, err := session.NewSession(&aws.Config{
Endpoint: aws.String("http://" + ip + ":8008"),
Credentials: credentials.NewSharedCredentials(os.Getenv(snowCredentialsFile), ip),
Region: aws.String("snow"),
})
if err != nil {
return nil, fmt.Errorf("Cannot create session to snow device: %v", err)
}
return sess, nil
}
func (s *Snow) WithProviderUpgrade(fillers ...api.SnowFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(api.SnowToConfigFiller(fillers...))
}
}
// WithBottlerocket123 returns a cluster config filler that sets the kubernetes version of the cluster to 1.23
// as well as the right devices and osFamily for all SnowMachineConfigs. It also sets any
// necessary machine config default required for BR, like the container volume size. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithBottlerocket123() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketForKubeVersion(anywherev1.Kube123)
}
// WithBottlerocket124 returns a cluster config filler that sets the kubernetes version of the cluster to 1.24
// as well as the right devices and osFamily for all SnowMachineConfigs. It also sets any
// necessary machine config default required for BR, like the container volume size. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithBottlerocket124() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketForKubeVersion(anywherev1.Kube124)
}
// WithBottlerocket125 returns a cluster config filler that sets the kubernetes version of the cluster to 1.25
// as well as the right devices and osFamily for all SnowMachineConfigs. It also sets any
// necessary machine config default required for BR, like the container volume size. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithBottlerocket125() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketForKubeVersion(anywherev1.Kube125)
}
// WithBottlerocket126 returns a cluster config filler that sets the kubernetes version of the cluster to 1.26
// as well as the right devices and osFamily for all SnowMachineConfigs. It also sets any
// necessary machine config default required for BR, like the container volume size. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithBottlerocket126() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketForKubeVersion(anywherev1.Kube126)
}
// WithBottlerocket127 returns a cluster config filler that sets the kubernetes version of the cluster to 1.27
// as well as the right devices and osFamily for all SnowMachineConfigs. It also sets any
// necessary machine config default required for BR, like the container volume size. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithBottlerocket127() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketForKubeVersion(anywherev1.Kube127)
}
// WithBottlerocketStaticIP123 returns a cluster config filler that sets the kubernetes version of the cluster to 1.23
// as well as the right devices, osFamily and static ip config for all SnowMachineConfigs. Comparing to WithBottlerocket123,
// this method also adds a snow ip pool to support static ip configuration.
func (s *Snow) WithBottlerocketStaticIP123() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketStaticIPForKubeVersion(anywherev1.Kube123)
}
// WithBottlerocketStaticIP124 returns a cluster config filler that sets the kubernetes version of the cluster to 1.24
// as well as the right devices, osFamily and static ip config for all SnowMachineConfigs. Comparing to WithBottlerocket124,
// this method also adds a snow ip pool to support static ip configuration.
func (s *Snow) WithBottlerocketStaticIP124() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketStaticIPForKubeVersion(anywherev1.Kube124)
}
// WithBottlerocketStaticIP125 returns a cluster config filler that sets the kubernetes version of the cluster to 1.25
// as well as the right devices, osFamily and static ip config for all SnowMachineConfigs. Comparing to WithBottlerocket125,
// this method also adds a snow ip pool to support static ip configuration.
func (s *Snow) WithBottlerocketStaticIP125() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketStaticIPForKubeVersion(anywherev1.Kube125)
}
// WithBottlerocketStaticIP126 returns a cluster config filler that sets the kubernetes version of the cluster to 1.26
// as well as the right devices, osFamily and static ip config for all SnowMachineConfigs. Comparing to WithBottlerocket126,
// this method also adds a snow ip pool to support static ip configuration.
func (s *Snow) WithBottlerocketStaticIP126() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketStaticIPForKubeVersion(anywherev1.Kube126)
}
// WithBottlerocketStaticIP127 returns a cluster config filler that sets the kubernetes version of the cluster to 1.27
// as well as the right devices, osFamily and static ip config for all SnowMachineConfigs. Comparing to WithBottlerocket127,
// this method also adds a snow ip pool to support static ip configuration.
func (s *Snow) WithBottlerocketStaticIP127() api.ClusterConfigFiller {
s.t.Helper()
return s.withBottlerocketStaticIPForKubeVersion(anywherev1.Kube127)
}
// WithUbuntu123 returns a cluster config filler that sets the kubernetes version of the cluster to 1.23
// as well as the right devices and osFamily for all SnowMachineConfigs. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithUbuntu123() api.ClusterConfigFiller {
s.t.Helper()
return s.WithKubeVersionAndOS(anywherev1.Ubuntu, anywherev1.Kube123)
}
// WithUbuntu124 returns a cluster config filler that sets the kubernetes version of the cluster to 1.24
// as well as the right devices and osFamily for all SnowMachineConfigs. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithUbuntu124() api.ClusterConfigFiller {
s.t.Helper()
return s.WithKubeVersionAndOS(anywherev1.Ubuntu, anywherev1.Kube124)
}
// WithUbuntu125 returns a cluster config filler that sets the kubernetes version of the cluster to 1.25
// as well as the right devices and osFamily for all SnowMachineConfigs. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithUbuntu125() api.ClusterConfigFiller {
s.t.Helper()
return s.WithKubeVersionAndOS(anywherev1.Ubuntu, anywherev1.Kube125)
}
// WithUbuntu126 returns a cluster config filler that sets the kubernetes version of the cluster to 1.26
// as well as the right devices and osFamily for all SnowMachineConfigs. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithUbuntu126() api.ClusterConfigFiller {
s.t.Helper()
return s.WithKubeVersionAndOS(anywherev1.Ubuntu, anywherev1.Kube126)
}
// WithUbuntu127 returns a cluster config filler that sets the kubernetes version of the cluster to 1.27
// as well as the right devices and osFamily for all SnowMachineConfigs. If the env var is set, this will
// also set the AMI ID. Otherwise, it will leave it empty and let CAPAS select one.
func (s *Snow) WithUbuntu127() api.ClusterConfigFiller {
s.t.Helper()
return s.WithKubeVersionAndOS(anywherev1.Ubuntu, anywherev1.Kube127)
}
func (s *Snow) withBottlerocketForKubeVersion(kubeVersion anywherev1.KubernetesVersion) api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
s.WithKubeVersionAndOS(anywherev1.Bottlerocket, kubeVersion),
api.SnowToConfigFiller(api.WithChangeForAllSnowMachines(api.WithSnowContainersVolumeSize(100))),
)
}
func (s *Snow) withBottlerocketStaticIPForKubeVersion(kubeVersion anywherev1.KubernetesVersion) api.ClusterConfigFiller {
poolName := "pool-1"
return api.JoinClusterConfigFillers(
s.WithKubeVersionAndOS(anywherev1.Bottlerocket, kubeVersion),
api.SnowToConfigFiller(api.WithChangeForAllSnowMachines(api.WithSnowContainersVolumeSize(100))),
api.SnowToConfigFiller(api.WithChangeForAllSnowMachines(api.WithStaticIP(poolName))),
api.SnowToConfigFiller(s.withIPPoolFromEnvVar(poolName)),
)
}
// WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the correct AMI ID
// and devices for the Snow machine configs.
func (s *Snow) WithKubeVersionAndOS(osFamily anywherev1.OSFamily, kubeVersion anywherev1.KubernetesVersion) api.ClusterConfigFiller {
envar := fmt.Sprintf("T_SNOW_AMIID_%s_%s", strings.ToUpper(string(osFamily)), strings.ReplaceAll(string(kubeVersion), ".", "_"))
return api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)),
api.SnowToConfigFiller(
s.withAMIIDFromEnvVar(envar),
api.WithSnowStringFromEnvVar(snowDevices, api.WithSnowDevicesForAllMachines),
api.WithOsFamilyForAllSnowMachines(osFamily),
),
)
}
func (s *Snow) withAMIIDFromEnvVar(envvar string) api.SnowFiller {
val, ok := os.LookupEnv(envvar)
if !ok {
s.t.Log("% for Snow AMI ID is not set, leaving amiID empty which will let CAPAS select the right AMI from the ones available in the device", envvar)
val = ""
}
return api.WithSnowAMIIDForAllMachines(val)
}
func (s *Snow) withIPPoolFromEnvVar(name string) api.SnowFiller {
envVars := []string{snowIPPoolIPStart, snowIPPoolIPEnd, snowIPPoolGateway, snowIPPoolSubnet}
checkRequiredEnvVars(s.t, envVars)
return api.WithSnowIPPool(name, os.Getenv(snowIPPoolIPStart), os.Getenv(snowIPPoolIPEnd), os.Getenv(snowIPPoolGateway), os.Getenv(snowIPPoolSubnet))
}
func WithSnowUbuntu123() SnowOpt {
return func(s *Snow) {
s.fillers = append(s.fillers,
api.WithSnowStringFromEnvVar(snowAMIIDUbuntu123, api.WithSnowAMIIDForAllMachines),
api.WithSnowStringFromEnvVar(snowDevices, api.WithSnowDevicesForAllMachines),
api.WithOsFamilyForAllSnowMachines(anywherev1.Ubuntu),
)
}
}
// WithSnowUbuntu124 returns SnowOpt that sets the right devices and osFamily for all SnowMachineConfigs.
// If the env var is set, this will also set the AMI ID.
// Otherwise, it will leave it empty and let CAPAS select one.
func WithSnowUbuntu124() SnowOpt {
return func(s *Snow) {
s.fillers = append(s.fillers,
s.withAMIIDFromEnvVar(snowAMIIDUbuntu124),
api.WithSnowStringFromEnvVar(snowDevices, api.WithSnowDevicesForAllMachines),
api.WithOsFamilyForAllSnowMachines(anywherev1.Ubuntu),
)
}
}
// WithSnowUbuntu125 returns SnowOpt that sets the right devices and osFamily for all SnowMachineConfigs.
// If the env var is set, this will also set the AMI ID.
// Otherwise, it will leave it empty and let CAPAS select one.
func WithSnowUbuntu125() SnowOpt {
return func(s *Snow) {
s.fillers = append(s.fillers,
api.WithSnowStringFromEnvVar(snowAMIIDUbuntu125, api.WithSnowAMIIDForAllMachines),
api.WithSnowStringFromEnvVar(snowDevices, api.WithSnowDevicesForAllMachines),
api.WithOsFamilyForAllSnowMachines(anywherev1.Ubuntu),
)
}
}
// WithSnowUbuntu126 returns SnowOpt that sets the right devices and osFamily for all SnowMachineConfigs.
// If the env var is set, this will also set the AMI ID.
// Otherwise, it will leave it empty and let CAPAS select one.
func WithSnowUbuntu126() SnowOpt {
return func(s *Snow) {
s.fillers = append(s.fillers,
api.WithSnowStringFromEnvVar(snowAMIIDUbuntu126, api.WithSnowAMIIDForAllMachines),
api.WithSnowStringFromEnvVar(snowDevices, api.WithSnowDevicesForAllMachines),
api.WithOsFamilyForAllSnowMachines(anywherev1.Ubuntu),
)
}
}
// WithSnowUbuntu127 returns SnowOpt that sets the right devices and osFamily for all SnowMachineConfigs.
// If the env var is set, this will also set the AMI ID.
// Otherwise, it will leave it empty and let CAPAS select one.
func WithSnowUbuntu127() SnowOpt {
return func(s *Snow) {
s.fillers = append(s.fillers,
api.WithSnowStringFromEnvVar(snowAMIIDUbuntu127, api.WithSnowAMIIDForAllMachines),
api.WithSnowStringFromEnvVar(snowDevices, api.WithSnowDevicesForAllMachines),
api.WithOsFamilyForAllSnowMachines(anywherev1.Ubuntu),
)
}
}
// WithSnowWorkerNodeGroup stores the necessary fillers to update/create the provided worker node group with its corresponding SnowMachineConfig
// and apply the given changes to that machine config.
func WithSnowWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup, fillers ...api.SnowMachineConfigFiller) SnowOpt {
return func(s *Snow) {
s.fillers = append(s.fillers, snowMachineConfig(name, fillers...))
s.clusterFillers = append(s.clusterFillers, buildSnowWorkerNodeGroupClusterFiller(name, workerNodeGroup))
}
}
// WithWorkerNodeGroup returns a filler that updates/creates the provided worker node group with its corresponding SnowMachineConfig
// and applies the given changes to that machine config.
func (s *Snow) WithWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup, fillers ...api.SnowMachineConfigFiller) api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(buildSnowWorkerNodeGroupClusterFiller(name, workerNodeGroup)),
api.SnowToConfigFiller(snowMachineConfig(name, fillers...)),
)
}
// WithNewWorkerNodeGroup returns a filler that updates/creates the provided worker node group with its corresponding SnowMachineConfig.
func (s *Snow) WithNewWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller {
return api.JoinClusterConfigFillers(
api.ClusterToConfigFiller(buildSnowWorkerNodeGroupClusterFiller(name, workerNodeGroup)),
api.SnowToConfigFiller(snowMachineConfig(name)),
)
}
// WithNewSnowWorkerNodeGroup updates the test cluster Config with the fillers for an specific snow worker node group.
// It applies the fillers in WorkerNodeGroup to the named worker node group and the ones for the corresponding SnowMachineConfig.
func (s *Snow) WithNewSnowWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup, fillers ...api.SnowMachineConfigFiller) ClusterE2ETestOpt {
return func(e *ClusterE2ETest) {
e.UpdateClusterConfig(
api.SnowToConfigFiller(snowMachineConfig(name, fillers...)),
api.ClusterToConfigFiller(buildSnowWorkerNodeGroupClusterFiller(name, workerNodeGroup)),
)
}
}
func snowMachineConfig(name string, fillers ...api.SnowMachineConfigFiller) api.SnowFiller {
f := make([]api.SnowMachineConfigFiller, 0, len(fillers)+2)
f = append(f,
api.WithSnowMachineDefaultValues(),
api.WithSnowDevices(os.Getenv(snowDevices)),
)
f = append(f, fillers...)
return api.WithSnowMachineConfig(name, f...)
}
func buildSnowWorkerNodeGroupClusterFiller(machineConfigName string, workerNodeGroup *WorkerNodeGroup) api.ClusterFiller {
workerNodeGroup.MachineConfigKind = anywherev1.SnowMachineConfigKind
workerNodeGroup.MachineConfigName = machineConfigName
return workerNodeGroup.ClusterFiller()
}
func UpdateSnowUbuntuTemplate123Var() api.SnowFiller {
return api.WithSnowStringFromEnvVar(snowAMIIDUbuntu123, api.WithSnowAMIIDForAllMachines)
}
// ClusterStateValidations returns a list of provider specific validations.
func (s *Snow) ClusterStateValidations() []clusterf.StateValidation {
return []clusterf.StateValidation{}
}
| 510 |
eks-anywhere | aws | Go | package framework
import (
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/logger"
)
const ownerAnnotation = "cluster.x-k8s.io/owner-name"
// ValidateControlPlaneTaints will validate that a controlPlane node has the expected taints.
func ValidateControlPlaneTaints(controlPlane v1alpha1.ControlPlaneConfiguration, node corev1.Node) (err error) {
if err := api.ValidateControlPlaneTaints(controlPlane, node); err != nil {
return err
}
logger.V(4).Info("Expected taints from cluster spec control plane configuration are present on corresponding node", "node", node.Name, "node taints", node.Spec.Taints, "control plane configuration taints", controlPlane.Taints)
return nil
}
// ValidateControlPlaneNoTaints will validate that a controlPlane has no taints, for example in the case of a single node cluster.
func ValidateControlPlaneNoTaints(controlPlane v1alpha1.ControlPlaneConfiguration, node corev1.Node) (err error) {
if err := api.ValidateControlPlaneNoTaints(controlPlane, node); err != nil {
return err
}
logger.V(4).Info("expected no taints on cluster spec control plane configuration and on corresponding node", "node", node.Name, "node taints", node.Spec.Taints, "control plane configuration taints", controlPlane.Taints)
return nil
}
// ValidateWorkerNodeTaints will validate that a worker node has the expected taints in the worker node group configuration.
func ValidateWorkerNodeTaints(w v1alpha1.WorkerNodeGroupConfiguration, node corev1.Node) (err error) {
if err := api.ValidateWorkerNodeTaints(w, node); err != nil {
return err
}
logger.V(4).Info("expected taints from cluster spec are present on corresponding node", "worker node group", w.Name, "worker node group taints", w.Taints, "node", node.Name, "node taints", node.Spec.Taints)
return nil
}
func NoExecuteTaint() corev1.Taint {
return corev1.Taint{
Key: "key1",
Value: "value1",
Effect: corev1.TaintEffectNoExecute,
}
}
func NoScheduleTaint() corev1.Taint {
return corev1.Taint{
Key: "key1",
Value: "value1",
Effect: corev1.TaintEffectNoSchedule,
}
}
func PreferNoScheduleTaint() corev1.Taint {
return corev1.Taint{
Key: "key1",
Value: "value1",
Effect: corev1.TaintEffectPreferNoSchedule,
}
}
func NoScheduleWorkerNodeGroup(name string, count int) *WorkerNodeGroup {
return WithWorkerNodeGroup(name, api.WithCount(count), api.WithTaint(NoScheduleTaint()))
}
func PreferNoScheduleWorkerNodeGroup(name string, count int) *WorkerNodeGroup {
return WithWorkerNodeGroup(name, api.WithCount(count), api.WithTaint(PreferNoScheduleTaint()))
}
| 71 |
eks-anywhere | aws | Go | package framework
import (
"context"
"os"
"testing"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
// providerTemplateNameGenerator is an interface for getting template name for each provider.
type providerTemplateNameGenerator interface {
envVarForTemplate(osFamilyStr, eksDName string) string
defaultNameForTemplate(osFamilyStr, eksDName string) string
searchTemplate(ctx context.Context, template string) (string, error)
defaultEnvVarForTemplate(osFamilyStr string, kubeVersion anywherev1.KubernetesVersion) string
}
// templateCache is a map of template name.
type templateRegistry struct {
cache map[string]string
generator providerTemplateNameGenerator
}
// templateForRelease tries to find a suitable template for a particular eks-a release, k8s version and OS family.
// It follows these steps:
//
// 1. Look for explicit configuration through an env var: "T_{provider}_TEMPLATE_{osFamily}_{eks-d version}"
// eg. T_CLOUDSTACK_TEMPLATE_REDHAT_KUBERNETES_1_23_EKS_22, T_VSPHERE_TEMPLATE_REDHAT_KUBERNETES_1_23_EKS_22
// This should be used for explicit configuration, mostly in local development for overrides.
//
// 2. If not present, look for a template if the default templates: "{eks-d version}-{osFamily}"
// eg. kubernetes-1-23-eks-22-redhat (CloudStack), /SDDC-Datacenter/vm/Templates/kubernetes-1-23-eks-22-redhat (vSphere)
// This is what should be used most of the time in CI, the explicit configuration is not present but the right template has already been
// imported to cloudstack.
//
// 3. If the template doesn't exist, default to the value of the default template env vars: eg. "T_CLOUDSTACK_TEMPLATE_REDHAT_1_23".
// This is a catch all condition. Mostly for edge cases where the bundle has been updated with a new eks-d version, but the
// the new template hasn't been imported yet. It also preserves backwards compatibility.
func (tc *templateRegistry) templateForRelease(t *testing.T, osFamily anywherev1.OSFamily, release *releasev1.EksARelease, kubeVersion anywherev1.KubernetesVersion) string {
t.Helper()
osFamilyStr := string(osFamily)
versionsBundle := readVersionsBundles(t, release, kubeVersion)
eksDName := versionsBundle.EksD.Name
templateEnvVarName := tc.generator.envVarForTemplate(osFamilyStr, eksDName)
cacheKey := templateEnvVarName
if template, ok := tc.cache[cacheKey]; ok {
t.Logf("Template for release found in cache, using %s template.", template)
return template
}
template, ok := os.LookupEnv(templateEnvVarName)
if ok && template != "" {
t.Logf("Env var %s is set, using %s template", templateEnvVarName, template)
tc.cache[cacheKey] = template
return template
}
t.Logf("Env var %s not is set, trying default generated template name", templateEnvVarName)
// Env var is not set, try default template name
template = tc.generator.defaultNameForTemplate(osFamilyStr, eksDName)
if template != "" {
foundTemplate, err := tc.generator.searchTemplate(context.Background(), template)
if err != nil {
t.Logf("Failed checking if default template exists: %v", err)
}
if foundTemplate != "" {
t.Logf("Default template for release exists, using %s template.", template)
tc.cache[cacheKey] = template
return template
}
t.Logf("Default template %s for release doesn't exit.", template)
}
// Default template doesn't exist, try legacy generic env var
// It is not guaranteed that this template will work for the given release, if they don't match the
// same ekd-d release, the test will fail. This is just a catch all last try for cases where the new template
// hasn't been imported with its own name but the default one matches the same eks-d release.
templateEnvVarName = tc.generator.defaultEnvVarForTemplate(osFamilyStr, kubeVersion)
template, ok = os.LookupEnv(templateEnvVarName)
if !ok || template == "" {
t.Fatalf("Env var %s for default template is not set, can't determine which template to use", templateEnvVarName)
}
t.Logf("Env var %s is set, using %s template. There are no guarantees this template will be valid. Cluster validation might fail.", templateEnvVarName, template)
tc.cache[cacheKey] = template
return template
}
| 91 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.