repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
type listImagesOptions struct {
fileName string
bundlesOverride string
}
var lio = &listImagesOptions{}
func init() {
listCmd.AddCommand(listImagesCommand)
listImagesCommand.Flags().StringVarP(&lio.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
listImagesCommand.Flags().StringVarP(&lio.bundlesOverride, "bundles-override", "", "", "Override default Bundles manifest (not recommended)")
}
var listImagesCommand = &cobra.Command{
Use: "images",
Short: "Generate a list of images used by EKS Anywhere",
Long: "This command is used to generate a list of images used by EKS-Anywhere for cluster provisioning",
PreRunE: func(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
if err := viper.BindPFlag(flag.Name, flag); err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
},
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
return listImages(cmd.Context(), lio.fileName, lio.bundlesOverride)
},
}
func listImages(context context.Context, clusterSpecPath, bundlesOverride string) error {
images, err := getImages(clusterSpecPath, bundlesOverride)
if err != nil {
return err
}
for _, image := range images {
if image.ImageDigest != "" {
fmt.Printf("%s@%s\n", image.URI, image.ImageDigest)
} else {
fmt.Printf("%s\n", image.URI)
}
}
return nil
}
| 60 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"sigs.k8s.io/yaml"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/version"
)
type listOvasOptions struct {
fileName string
bundlesOverride string
}
type listOvasOutput struct {
URI string
SHA256 string
SHA512 string
}
var listOvaOpts = &listOvasOptions{}
func init() {
listCmd.AddCommand(listOvasCmd)
listOvasCmd.Flags().StringVarP(&listOvaOpts.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
listOvasCmd.Flags().StringVarP(&listOvaOpts.bundlesOverride, "bundles-override", "", "", "Override default Bundles manifest (not recommended)")
err := listOvasCmd.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking filename flag as required: %v", err)
}
}
var listOvasCmd = &cobra.Command{
Use: "ovas",
Short: "List the OVAs that are supported by current version of EKS Anywhere",
Long: "This command is used to list the vSphere OVAs from the EKS Anywhere bundle manifest for the current version of the EKS Anywhere CLI",
PreRunE: preRunListOvasCmd,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := listOvas(cmd.Context(), listOvaOpts.fileName, listOvaOpts.bundlesOverride); err != nil {
return err
}
return nil
},
}
func listOvas(context context.Context, clusterSpecPath, bundlesOverride string) error {
var specOpts []cluster.FileSpecBuilderOpt
if bundlesOverride != "" {
specOpts = append(specOpts, cluster.WithOverrideBundlesManifest(bundlesOverride))
}
clusterSpec, err := readAndValidateClusterSpec(clusterSpecPath, version.Get(), specOpts...)
if err != nil {
return err
}
bundle := clusterSpec.VersionsBundle
titler := cases.Title(language.English)
for _, ova := range bundle.Ovas() {
if strings.Contains(ova.URI, string(eksav1alpha1.Bottlerocket)) {
fmt.Printf("%s:\n", titler.String(string(eksav1alpha1.Bottlerocket)))
} else {
fmt.Printf("%s:\n", titler.String(string(eksav1alpha1.Ubuntu)))
}
output := listOvasOutput{
URI: ova.URI,
SHA256: ova.SHA256,
SHA512: ova.SHA512,
}
yamlOutput, err := yaml.Marshal(output)
if err != nil {
return err
}
fmt.Println(yamlIndent(2, string(yamlOutput)))
}
return nil
}
func preRunListOvasCmd(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
}
func yamlIndent(level int, yamlString string) string {
indentation := strings.Repeat(" ", level)
indentedString := fmt.Sprintf("%s%s", indentation, strings.Replace(yamlString, "\n", "\n"+indentation, -1))
return indentedString
}
| 107 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type listPackagesOption struct {
kubeVersion string
clusterName string
registry string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
bundlesOverride string
}
var lpo = &listPackagesOption{}
func init() {
listCmd.AddCommand(listPackagesCommand)
listPackagesCommand.Flags().StringVar(&lpo.kubeVersion, "kube-version", "",
"Kubernetes version <major>.<minor> of the packages to list, for example: \"1.23\".")
listPackagesCommand.Flags().StringVar(&lpo.registry, "registry", "",
"Specifies an alternative registry for packages discovery.")
listPackagesCommand.Flags().StringVar(&lpo.kubeConfig, "kubeconfig", "",
"Path to a kubeconfig file to use when source is a cluster.")
listPackagesCommand.Flags().StringVar(&lpo.clusterName, "cluster", "",
"Name of cluster for package list.")
listPackagesCommand.Flags().StringVar(&lpo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
}
var listPackagesCommand = &cobra.Command{
Use: "packages",
Short: "Lists curated packages available to install",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := curatedpackages.ValidateKubeVersion(lpo.kubeVersion, lpo.clusterName); err != nil {
return err
}
if err := listPackages(cmd.Context()); err != nil {
return err
}
return nil
},
}
func listPackages(ctx context.Context) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(lpo.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithRegistryName(lpo.registry), WithKubeVersion(lpo.kubeVersion), WithMountPaths(kubeConfig), WithBundlesOverride(lpo.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
bm := curatedpackages.CreateBundleManager(deps.Logger)
b := curatedpackages.NewBundleReader(kubeConfig, lpo.clusterName, deps.Kubectl, bm, deps.BundleRegistry)
bundle, err := b.GetLatestBundle(ctx, lpo.kubeVersion)
if err != nil {
return err
}
packages := curatedpackages.NewPackageClient(
deps.Kubectl,
curatedpackages.WithBundle(bundle),
)
return packages.DisplayPackages(os.Stdout)
}
| 82 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/spf13/pflag"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermanager"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/version"
)
const defaultTinkerbellNodeStartupTimeout = 20 * time.Minute
const timeoutErrorTemplate = "failed to parse timeout %s: %v"
type timeoutOptions struct {
cpWaitTimeout string
externalEtcdWaitTimeout string
perMachineWaitTimeout string
unhealthyMachineTimeout string
nodeStartupTimeout string
noTimeouts bool
}
func applyTimeoutFlags(flagSet *pflag.FlagSet, t *timeoutOptions) {
flagSet.StringVar(&t.cpWaitTimeout, cpWaitTimeoutFlag, clustermanager.DefaultControlPlaneWait.String(), "Override the default control plane wait timeout")
flagSet.StringVar(&t.externalEtcdWaitTimeout, externalEtcdWaitTimeoutFlag, clustermanager.DefaultEtcdWait.String(), "Override the default external etcd wait timeout")
flagSet.StringVar(&t.perMachineWaitTimeout, perMachineWaitTimeoutFlag, clustermanager.DefaultMaxWaitPerMachine.String(), "Override the default machine wait timeout per machine")
flagSet.StringVar(&t.unhealthyMachineTimeout, unhealthyMachineTimeoutFlag, clustermanager.DefaultUnhealthyMachineTimeout.String(), "Override the default unhealthy machine timeout")
flagSet.StringVar(&t.nodeStartupTimeout, nodeStartupTimeoutFlag, clustermanager.DefaultNodeStartupTimeout.String(), "Override the default node startup timeout (Defaults to 20m for Tinkerbell clusters)")
flagSet.BoolVar(&t.noTimeouts, noTimeoutsFlag, false, "Disable timeout for all wait operations")
}
// buildClusterManagerOpts builds options for constructing a ClusterManager from CLI flags.
// datacenterKind is an API kind such as v1alpha1.TinkerbellDatacenterKind.
func buildClusterManagerOpts(t timeoutOptions, datacenterKind string) (*dependencies.ClusterManagerTimeoutOptions, error) {
cpWaitTimeout, err := time.ParseDuration(t.cpWaitTimeout)
if err != nil {
return nil, fmt.Errorf(timeoutErrorTemplate, cpWaitTimeoutFlag, err)
}
externalEtcdWaitTimeout, err := time.ParseDuration(t.externalEtcdWaitTimeout)
if err != nil {
return nil, fmt.Errorf(timeoutErrorTemplate, externalEtcdWaitTimeoutFlag, err)
}
perMachineWaitTimeout, err := time.ParseDuration(t.perMachineWaitTimeout)
if err != nil {
return nil, fmt.Errorf(timeoutErrorTemplate, perMachineWaitTimeoutFlag, err)
}
unhealthyMachineTimeout, err := time.ParseDuration(t.unhealthyMachineTimeout)
if err != nil {
return nil, fmt.Errorf(timeoutErrorTemplate, unhealthyMachineTimeoutFlag, err)
}
if t.nodeStartupTimeout == clustermanager.DefaultNodeStartupTimeout.String() &&
datacenterKind == v1alpha1.TinkerbellDatacenterKind {
t.nodeStartupTimeout = defaultTinkerbellNodeStartupTimeout.String()
}
nodeStartupTimeout, err := time.ParseDuration(t.nodeStartupTimeout)
if err != nil {
return nil, fmt.Errorf(timeoutErrorTemplate, nodeStartupTimeoutFlag, err)
}
return &dependencies.ClusterManagerTimeoutOptions{
ControlPlaneWait: cpWaitTimeout,
ExternalEtcdWait: externalEtcdWaitTimeout,
MachineWait: perMachineWaitTimeout,
UnhealthyMachineWait: unhealthyMachineTimeout,
NodeStartupWait: nodeStartupTimeout,
NoTimeouts: t.noTimeouts,
}, nil
}
type clusterOptions struct {
fileName string
bundlesOverride string
managementKubeconfig string
}
func (c clusterOptions) mountDirs() []string {
var dirs []string
if c.managementKubeconfig != "" {
dirs = append(dirs, filepath.Dir(c.managementKubeconfig))
}
return dirs
}
func readClusterSpec(clusterConfigPath string, cliVersion version.Info, opts ...cluster.FileSpecBuilderOpt) (*cluster.Spec, error) {
b := cluster.NewFileSpecBuilder(files.NewReader(), cliVersion, opts...)
return b.Build(clusterConfigPath)
}
func readAndValidateClusterSpec(clusterConfigPath string, cliVersion version.Info, opts ...cluster.FileSpecBuilderOpt) (*cluster.Spec, error) {
clusterSpec, err := readClusterSpec(clusterConfigPath, cliVersion, opts...)
if err != nil {
return nil, err
}
if err = cluster.ValidateConfig(clusterSpec.Config); err != nil {
return nil, err
}
return clusterSpec, nil
}
func newClusterSpec(options clusterOptions) (*cluster.Spec, error) {
var opts []cluster.FileSpecBuilderOpt
if options.bundlesOverride != "" {
opts = append(opts, cluster.WithOverrideBundlesManifest(options.bundlesOverride))
}
clusterSpec, err := readAndValidateClusterSpec(options.fileName, version.Get(), opts...)
if err != nil {
return nil, fmt.Errorf("unable to get cluster config from file: %v", err)
}
if clusterSpec.Cluster.IsManaged() && options.managementKubeconfig == "" {
options.managementKubeconfig = kubeconfig.FromEnvironment()
}
if options.managementKubeconfig != "" {
managementCluster, err := cluster.LoadManagement(options.managementKubeconfig)
if err != nil {
return nil, fmt.Errorf("unable to get management cluster from kubeconfig: %v", err)
}
clusterSpec.ManagementCluster = managementCluster
}
return clusterSpec, nil
}
func markFlagHidden(flagSet *pflag.FlagSet, flagName string) {
if err := flagSet.MarkHidden(flagName); err != nil {
logger.V(5).Info("Warning: Failed to mark flag as hidden: " + flagName)
}
}
func buildCliConfig(clusterSpec *cluster.Spec) *config.CliConfig {
cliConfig := &config.CliConfig{}
if clusterSpec.FluxConfig != nil && clusterSpec.FluxConfig.Spec.Git != nil {
cliConfig.GitSshKeyPassphrase = os.Getenv(config.EksaGitPassphraseTokenEnv)
cliConfig.GitPrivateKeyFile = os.Getenv(config.EksaGitPrivateKeyTokenEnv)
cliConfig.GitKnownHostsFile = os.Getenv(config.EksaGitKnownHostsFileEnv)
}
return cliConfig
}
func getManagementCluster(clusterSpec *cluster.Spec) *types.Cluster {
if clusterSpec.ManagementCluster == nil {
return &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: kubeconfig.FromClusterName(clusterSpec.Cluster.Name),
}
} else {
return &types.Cluster{
Name: clusterSpec.ManagementCluster.Name,
KubeconfigFile: clusterSpec.ManagementCluster.KubeconfigFile,
}
}
}
func (c *clusterOptions) directoriesToMount(clusterSpec *cluster.Spec, cliConfig *config.CliConfig, addDirs ...string) ([]string, error) {
dirs := c.mountDirs()
fluxConfig := clusterSpec.FluxConfig
if fluxConfig != nil && fluxConfig.Spec.Git != nil {
dirs = append(dirs, filepath.Dir(cliConfig.GitPrivateKeyFile))
dirs = append(dirs, filepath.Dir(cliConfig.GitKnownHostsFile))
}
if clusterSpec.Config.Cluster.Spec.DatacenterRef.Kind == v1alpha1.CloudStackDatacenterKind {
if extraDirs, err := c.cloudStackDirectoriesToMount(); err == nil {
dirs = append(dirs, extraDirs...)
}
}
for _, addDir := range addDirs {
dirs = append(dirs, filepath.Dir(addDir))
}
return dirs, nil
}
func (c *clusterOptions) cloudStackDirectoriesToMount() ([]string, error) {
dirs := []string{}
env, found := os.LookupEnv(decoder.EksaCloudStackHostPathToMount)
if found && len(env) > 0 {
mountDirs := strings.Split(env, ",")
for _, dir := range mountDirs {
if _, err := os.Stat(dir); err != nil {
return nil, fmt.Errorf("invalid host path to mount: %v", err)
}
dirs = append(dirs, dir)
}
}
return dirs, nil
}
| 214 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"time"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/logger"
)
var rootCmd = &cobra.Command{
Use: "anywhere",
Short: "Amazon EKS Anywhere",
Long: `Use eksctl anywhere to build your own self-managing cluster on your hardware with the best of Amazon EKS`,
PersistentPreRun: rootPersistentPreRun,
PersistentPostRun: func(cmd *cobra.Command, args []string) {
outputFilePath := logger.GetOutputFilePath()
if outputFilePath == "" {
return
}
if err := os.Remove(outputFilePath); err != nil {
fmt.Printf("Failed to cleanup log file %s: %s", outputFilePath, err)
}
},
}
func init() {
rootCmd.PersistentFlags().IntP("verbosity", "v", 0, "Set the log level verbosity")
if err := viper.BindPFlags(rootCmd.PersistentFlags()); err != nil {
log.Fatalf("failed to bind flags for root: %v", err)
}
}
func rootPersistentPreRun(cmd *cobra.Command, args []string) {
if err := initLogger(); err != nil {
log.Fatal(err)
}
}
func initLogger() error {
logsFolder := filepath.Join(".", "eksa-cli-logs")
err := os.MkdirAll(logsFolder, 0o750)
if err != nil {
return fmt.Errorf("failed to create logs folder: %v", err)
}
outputFilePath := filepath.Join(".", "eksa-cli-logs", fmt.Sprintf("%s.log", time.Now().Format("2006-01-02T15_04_05")))
if err = logger.Init(logger.Options{
Level: viper.GetInt("verbosity"),
OutputFilePath: outputFilePath,
}); err != nil {
return fmt.Errorf("root cmd: %v", err)
}
return nil
}
func Execute() error {
return rootCmd.ExecuteContext(context.Background())
}
// RootCmd returns the eksctl-anywhere root cmd.
func RootCmd() *cobra.Command {
return rootCmd
}
| 72 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/diagnostics"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/version"
)
type createSupportBundleOptions struct {
fileName string
wConfig string
since string
sinceTime string
bundleConfig string
hardwareFileName string
tinkerbellBootstrapIP string
}
var csbo = &createSupportBundleOptions{}
var supportbundleCmd = &cobra.Command{
Use: "support-bundle -f my-cluster.yaml",
Short: "Generate a support bundle",
Long: "This command is used to create a support bundle to troubleshoot a cluster",
PreRunE: preRunSupportBundle,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := csbo.validate(cmd.Context()); err != nil {
return err
}
if err := csbo.createBundle(cmd.Context(), csbo.since, csbo.sinceTime, csbo.bundleConfig); err != nil {
return fmt.Errorf("failed to create support bundle: %v", err)
}
return nil
},
}
func init() {
generateCmd.AddCommand(supportbundleCmd)
supportbundleCmd.Flags().StringVarP(&csbo.sinceTime, "since-time", "", "", "Collect pod logs after a specific datetime(RFC3339) like 2021-06-28T15:04:05Z")
supportbundleCmd.Flags().StringVarP(&csbo.since, "since", "", "", "Collect pod logs in the latest duration like 5s, 2m, or 3h.")
supportbundleCmd.Flags().StringVarP(&csbo.bundleConfig, "bundle-config", "", "", "Bundle Config file to use when generating support bundle")
supportbundleCmd.Flags().StringVarP(&csbo.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
supportbundleCmd.Flags().StringVarP(&csbo.wConfig, "w-config", "w", "", "Kubeconfig file to use when creating support bundle for a workload cluster")
err := supportbundleCmd.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func (csbo *createSupportBundleOptions) validate(ctx context.Context) error {
clusterConfig, err := commonValidation(ctx, csbo.fileName)
if err != nil {
return err
}
kubeconfigPath := kubeconfig.FromClusterName(clusterConfig.Name)
if err := kubeconfig.ValidateFilename(kubeconfigPath); err != nil {
return err
}
return nil
}
func preRunSupportBundle(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
}
func (csbo *createSupportBundleOptions) createBundle(ctx context.Context, since, sinceTime, bundleConfig string) error {
clusterSpec, err := readAndValidateClusterSpec(csbo.fileName, version.Get())
if err != nil {
return fmt.Errorf("unable to get cluster config from file: %v", err)
}
deps, err := dependencies.ForSpec(ctx, clusterSpec).
WithProvider(csbo.fileName, clusterSpec.Cluster, cc.skipIpCheck, csbo.hardwareFileName, false, csbo.tinkerbellBootstrapIP).
WithDiagnosticBundleFactory().
Build(ctx)
if err != nil {
return err
}
defer close(ctx, deps)
supportBundle, err := deps.DignosticCollectorFactory.DiagnosticBundle(clusterSpec, deps.Provider, getKubeconfigPath(clusterSpec.Cluster.Name, csbo.wConfig), bundleConfig)
if err != nil {
return fmt.Errorf("failed to parse collector: %v", err)
}
var sinceTimeValue *time.Time
sinceTimeValue, err = diagnostics.ParseTimeOptions(since, sinceTime)
if err != nil {
return fmt.Errorf("failed parse since time: %v", err)
}
err = supportBundle.CollectAndAnalyze(ctx, sinceTimeValue)
if err != nil {
return fmt.Errorf("collecting and analyzing bundle: %v", err)
}
err = supportBundle.PrintAnalysis()
if err != nil {
return fmt.Errorf("printing analysis")
}
return nil
}
| 123 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var upgradeCmd = &cobra.Command{
Use: "upgrade",
Short: "Upgrade resources",
Long: "Use eksctl anywhere upgrade to upgrade resources, such as clusters",
}
func init() {
rootCmd.AddCommand(upgradeCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
"github.com/aws/eks-anywhere/pkg/workflows"
)
type upgradeClusterOptions struct {
clusterOptions
timeoutOptions
wConfig string
forceClean bool
hardwareCSVPath string
tinkerbellBootstrapIP string
skipValidations []string
}
var uc = &upgradeClusterOptions{}
var upgradeClusterCmd = &cobra.Command{
Use: "cluster",
Short: "Upgrade workload cluster",
Long: "This command is used to upgrade workload clusters",
PreRunE: bindFlagsToViper,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := uc.upgradeCluster(cmd); err != nil {
return fmt.Errorf("failed to upgrade cluster: %v", err)
}
return nil
},
}
func init() {
upgradeCmd.AddCommand(upgradeClusterCmd)
applyClusterOptionFlags(upgradeClusterCmd.Flags(), &uc.clusterOptions)
applyTimeoutFlags(upgradeClusterCmd.Flags(), &uc.timeoutOptions)
applyTinkerbellHardwareFlag(upgradeClusterCmd.Flags(), &uc.hardwareCSVPath)
upgradeClusterCmd.Flags().StringVarP(&uc.wConfig, "w-config", "w", "", "Kubeconfig file to use when upgrading a workload cluster")
upgradeClusterCmd.Flags().BoolVar(&uc.forceClean, "force-cleanup", false, "Force deletion of previously created bootstrap cluster")
upgradeClusterCmd.Flags().StringArrayVar(&uc.skipValidations, "skip-validations", []string{}, "Bypass upgrade validations by name. Valid arguments you can pass are --skip-validations=pod-disruption")
if err := upgradeClusterCmd.MarkFlagRequired("filename"); err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command) error {
ctx := cmd.Context()
clusterConfigFileExist := validations.FileExists(uc.fileName)
if !clusterConfigFileExist {
return fmt.Errorf("the cluster config file %s does not exist", uc.fileName)
}
clusterConfig, err := v1alpha1.GetAndValidateClusterConfig(uc.fileName)
if err != nil {
return fmt.Errorf("the cluster config file provided is invalid: %v", err)
}
if clusterConfig.Spec.DatacenterRef.Kind == v1alpha1.TinkerbellDatacenterKind {
if err := checkTinkerbellFlags(cmd.Flags(), uc.hardwareCSVPath, Upgrade); err != nil {
return err
}
}
if _, err := uc.commonValidations(ctx); err != nil {
return fmt.Errorf("common validations failed due to: %v", err)
}
clusterSpec, err := newClusterSpec(uc.clusterOptions)
if err != nil {
return err
}
if err := validations.ValidateAuthenticationForRegistryMirror(clusterSpec); err != nil {
return err
}
cliConfig := buildCliConfig(clusterSpec)
dirs, err := uc.directoriesToMount(clusterSpec, cliConfig)
if err != nil {
return err
}
clusterManagerTimeoutOpts, err := buildClusterManagerOpts(uc.timeoutOptions, clusterSpec.Cluster.Spec.DatacenterRef.Kind)
if err != nil {
return fmt.Errorf("failed to build cluster manager opts: %v", err)
}
factory := dependencies.ForSpec(ctx, clusterSpec).WithExecutableMountDirs(dirs...).
WithBootstrapper().
WithCliConfig(cliConfig).
WithClusterManager(clusterSpec.Cluster, clusterManagerTimeoutOpts).
WithKubeProxyCLIUpgrader().
WithProvider(uc.fileName, clusterSpec.Cluster, cc.skipIpCheck, uc.hardwareCSVPath, uc.forceClean, uc.tinkerbellBootstrapIP).
WithGitOpsFlux(clusterSpec.Cluster, clusterSpec.FluxConfig, cliConfig).
WithWriter().
WithCAPIManager().
WithEksdUpgrader().
WithEksdInstaller().
WithKubectl().
WithValidatorClients()
if uc.timeoutOptions.noTimeouts {
factory.WithNoTimeouts()
}
deps, err := factory.Build(ctx)
if err != nil {
return err
}
defer close(ctx, deps)
upgradeCluster := workflows.NewUpgrade(
deps.Bootstrapper,
deps.Provider,
deps.CAPIManager,
deps.ClusterManager,
deps.GitOpsFlux,
deps.Writer,
deps.EksdUpgrader,
deps.EksdInstaller,
deps.KubeProxyCLIUpgrader,
)
workloadCluster := &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: getKubeconfigPath(clusterSpec.Cluster.Name, uc.wConfig),
}
var managementCluster *types.Cluster
if clusterSpec.ManagementCluster == nil {
managementCluster = workloadCluster
} else {
managementCluster = clusterSpec.ManagementCluster
}
validationOpts := &validations.Opts{
Kubectl: deps.UnAuthKubectlClient,
Spec: clusterSpec,
WorkloadCluster: workloadCluster,
ManagementCluster: managementCluster,
Provider: deps.Provider,
CliConfig: cliConfig,
}
if len(uc.skipValidations) != 0 {
validationOpts.SkippedValidations, err = upgradevalidations.ValidateSkippableUpgradeValidation(uc.skipValidations)
if err != nil {
return err
}
}
upgradeValidations := upgradevalidations.New(validationOpts)
err = upgradeCluster.Run(ctx, clusterSpec, managementCluster, workloadCluster, upgradeValidations, uc.forceClean)
cleanup(deps, &err)
return err
}
func (uc *upgradeClusterOptions) commonValidations(ctx context.Context) (cluster *v1alpha1.Cluster, err error) {
clusterConfig, err := commonValidation(ctx, uc.fileName)
if err != nil {
return nil, err
}
kubeconfigPath := getKubeconfigPath(clusterConfig.Name, uc.wConfig)
if err := kubeconfig.ValidateFilename(kubeconfigPath); err != nil {
return nil, err
}
return clusterConfig, nil
}
| 184 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type upgradePackageOptions struct {
bundleVersion string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
clusterName string
bundlesOverride string
}
var upo = &upgradePackageOptions{}
func init() {
upgradeCmd.AddCommand(upgradePackagesCommand)
upgradePackagesCommand.Flags().StringVar(&upo.bundleVersion, "bundle-version",
"", "Bundle version to use")
upgradePackagesCommand.Flags().StringVar(&upo.kubeConfig, "kubeconfig",
"", "Path to an optional kubeconfig file to use.")
upgradePackagesCommand.Flags().StringVar(&upo.clusterName, "cluster",
"", "Cluster to upgrade.")
upgradePackagesCommand.Flags().StringVar(&upo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
err := upgradePackagesCommand.MarkFlagRequired("bundle-version")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
err = upgradePackagesCommand.MarkFlagRequired("cluster")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
var upgradePackagesCommand = &cobra.Command{
Use: "packages",
Short: "Upgrade all curated packages to the latest version",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := upgradePackages(cmd.Context()); err != nil {
return err
}
return nil
},
}
func upgradePackages(ctx context.Context) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(upo.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithBundlesOverride(upo.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
b := curatedpackages.NewBundleReader(kubeConfig, upo.clusterName, deps.Kubectl, nil, nil)
activeController, err := b.GetActiveController(ctx)
if err != nil {
return err
}
return b.UpgradeBundle(ctx, activeController, upo.bundleVersion)
}
| 78 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var upgradePlanCmd = &cobra.Command{
Use: "plan",
Short: "Provides information for a resource upgrade",
Long: "Use eksctl anywhere upgrade plan to get information for a resource upgrade",
}
func init() {
upgradeCmd.AddCommand(upgradePlanCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"text/tabwriter"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
capiupgrader "github.com/aws/eks-anywhere/pkg/clusterapi"
eksaupgrader "github.com/aws/eks-anywhere/pkg/clustermanager"
"github.com/aws/eks-anywhere/pkg/dependencies"
fluxupgrader "github.com/aws/eks-anywhere/pkg/gitops/flux"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
outputFlagName = "output"
outputDefault = outputText
outputText = "text"
outputJson = "json"
)
var output string
var upgradePlanClusterCmd = &cobra.Command{
Use: "cluster",
Short: "Provides new release versions for the next cluster upgrade",
Long: "Provides a list of target versions for upgrading the core components in the workload cluster",
PreRunE: preRunUpgradePlanCluster,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := uc.upgradePlanCluster(cmd.Context()); err != nil {
return fmt.Errorf("failed to display upgrade plan: %v", err)
}
return nil
},
}
func preRunUpgradePlanCluster(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
}
func init() {
upgradePlanCmd.AddCommand(upgradePlanClusterCmd)
upgradePlanClusterCmd.Flags().StringVarP(&uc.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
upgradePlanClusterCmd.Flags().StringVar(&uc.bundlesOverride, "bundles-override", "", "Override default Bundles manifest (not recommended)")
upgradePlanClusterCmd.Flags().StringVarP(&output, outputFlagName, "o", outputDefault, "Output format: text|json")
upgradePlanClusterCmd.Flags().StringVar(&uc.managementKubeconfig, "kubeconfig", "", "Management cluster kubeconfig file")
err := upgradePlanClusterCmd.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func (uc *upgradeClusterOptions) upgradePlanCluster(ctx context.Context) error {
if _, err := uc.commonValidations(ctx); err != nil {
return fmt.Errorf("common validations failed due to: %v", err)
}
newClusterSpec, err := newClusterSpec(uc.clusterOptions)
if err != nil {
return err
}
deps, err := dependencies.ForSpec(ctx, newClusterSpec).
WithClusterManager(newClusterSpec.Cluster, nil).
WithProvider(uc.fileName, newClusterSpec.Cluster, false, uc.hardwareCSVPath, uc.forceClean, uc.tinkerbellBootstrapIP).
WithGitOpsFlux(newClusterSpec.Cluster, newClusterSpec.FluxConfig, nil).
WithCAPIManager().
Build(ctx)
if err != nil {
return err
}
managementCluster := &types.Cluster{
Name: newClusterSpec.Cluster.Name,
KubeconfigFile: getKubeconfigPath(newClusterSpec.Cluster.Name, uc.wConfig),
}
if newClusterSpec.ManagementCluster != nil {
managementCluster = newClusterSpec.ManagementCluster
}
logger.V(0).Info("Checking new release availability...")
currentSpec, err := deps.ClusterManager.GetCurrentClusterSpec(ctx, managementCluster, newClusterSpec.Cluster.Name)
if err != nil {
return err
}
componentChangeDiffs := eksaupgrader.EksaChangeDiff(currentSpec, newClusterSpec)
if componentChangeDiffs == nil {
componentChangeDiffs = &types.ChangeDiff{}
}
componentChangeDiffs.Append(fluxupgrader.FluxChangeDiff(currentSpec, newClusterSpec))
componentChangeDiffs.Append(capiupgrader.CapiChangeDiff(currentSpec, newClusterSpec, deps.Provider))
componentChangeDiffs.Append(cilium.ChangeDiff(currentSpec, newClusterSpec))
serializedDiff, err := serialize(componentChangeDiffs, output)
if err != nil {
return err
}
fmt.Print(serializedDiff)
return nil
}
func serialize(componentChangeDiffs *types.ChangeDiff, outputFormat string) (string, error) {
switch outputFormat {
case outputText:
return serializeToText(componentChangeDiffs)
case outputJson:
return serializeToJson(componentChangeDiffs)
default:
return "", fmt.Errorf("invalid output format [%s]", outputFormat)
}
}
func serializeToText(componentChangeDiffs *types.ChangeDiff) (string, error) {
if componentChangeDiffs == nil {
return "All the components are up to date with the latest versions", nil
}
buffer := bytes.Buffer{}
w := tabwriter.NewWriter(&buffer, 10, 4, 3, ' ', 0)
fmt.Fprintln(w, "NAME\tCURRENT VERSION\tNEXT VERSION")
for i := range componentChangeDiffs.ComponentReports {
fmt.Fprintf(w, "%s\t%s\t%s\n", componentChangeDiffs.ComponentReports[i].ComponentName, componentChangeDiffs.ComponentReports[i].OldVersion, componentChangeDiffs.ComponentReports[i].NewVersion)
}
if err := w.Flush(); err != nil {
return "", fmt.Errorf("failed flushing table writer: %v", err)
}
return buffer.String(), nil
}
func serializeToJson(componentChangeDiffs *types.ChangeDiff) (string, error) {
if componentChangeDiffs == nil {
componentChangeDiffs = &types.ChangeDiff{ComponentReports: []types.ComponentChangeDiff{}}
}
jsonDiff, err := json.Marshal(componentChangeDiffs)
if err != nil {
return "", fmt.Errorf("failed serializing the components diff to json: %v", err)
}
return string(jsonDiff), nil
}
| 163 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var validateCmd = &cobra.Command{
Use: "validate",
Short: "Validate resource or action",
Long: "Use eksctl anywhere validate to validate a resource or action",
}
func init() {
expCmd.AddCommand(validateCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var validateCreateCmd = &cobra.Command{
Use: "create",
Short: "Validate create resources",
Long: "Use eksctl anywhere validate create to validate the create action on resources, such as cluster",
}
func init() {
validateCmd.AddCommand(validateCreateCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"log"
"os"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/createcluster"
"github.com/aws/eks-anywhere/pkg/validations/createvalidations"
"github.com/aws/eks-anywhere/pkg/version"
)
type validateOptions struct {
clusterOptions
hardwareCSVPath string
tinkerbellBootstrapIP string
}
var valOpt = &validateOptions{}
var validateCreateClusterCmd = &cobra.Command{
Use: "cluster -f <cluster-config-file> [flags]",
Short: "Validate create cluster",
Long: "Use eksctl anywhere validate create cluster to validate the create cluster action",
PreRunE: bindFlagsToViper,
SilenceUsage: true,
RunE: valOpt.validateCreateCluster,
}
func init() {
validateCreateCmd.AddCommand(validateCreateClusterCmd)
applyTinkerbellHardwareFlag(validateCreateClusterCmd.Flags(), &valOpt.hardwareCSVPath)
validateCreateClusterCmd.Flags().StringVarP(&valOpt.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
validateCreateClusterCmd.Flags().StringVar(&valOpt.tinkerbellBootstrapIP, "tinkerbell-bootstrap-ip", "", "Override the local tinkerbell IP in the bootstrap cluster")
if err := validateCreateClusterCmd.MarkFlagRequired("filename"); err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func (valOpt *validateOptions) validateCreateCluster(cmd *cobra.Command, _ []string) error {
ctx := cmd.Context()
clusterSpec, err := readClusterSpec(valOpt.fileName, version.Get())
if err != nil {
return err
}
if clusterSpec.Config.Cluster.Spec.DatacenterRef.Kind == v1alpha1.TinkerbellDatacenterKind {
if err := checkTinkerbellFlags(cmd.Flags(), valOpt.hardwareCSVPath, 0); err != nil {
return err
}
}
cliConfig := buildCliConfig(clusterSpec)
dirs, err := valOpt.directoriesToMount(clusterSpec, cliConfig)
if err != nil {
return err
}
tmpPath, err := os.MkdirTemp("./", "tmpValidate")
if err != nil {
return err
}
deps, err := dependencies.ForSpec(ctx, clusterSpec).
WithExecutableMountDirs(dirs...).
WithWriterFolder(tmpPath).
WithDocker().
WithKubectl().
WithProvider(valOpt.fileName, clusterSpec.Cluster, false, valOpt.hardwareCSVPath, true, valOpt.tinkerbellBootstrapIP).
WithGitOpsFlux(clusterSpec.Cluster, clusterSpec.FluxConfig, cliConfig).
WithUnAuthKubeClient().
WithValidatorClients().
Build(ctx)
if err != nil {
cleanupDirectory(tmpPath)
return err
}
defer close(ctx, deps)
validationOpts := &validations.Opts{
Kubectl: deps.UnAuthKubectlClient,
Spec: clusterSpec,
WorkloadCluster: &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: kubeconfig.FromClusterName(clusterSpec.Cluster.Name),
},
ManagementCluster: getManagementCluster(clusterSpec),
Provider: deps.Provider,
CliConfig: cliConfig,
}
createValidations := createvalidations.New(validationOpts)
commandVal := createcluster.NewValidations(clusterSpec, deps.Provider, deps.GitOpsFlux, createValidations, deps.DockerClient)
err = commandVal.Validate(ctx)
cleanupDirectory(tmpPath)
return err
}
| 107 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/validations"
)
func commonValidation(ctx context.Context, clusterConfigFile string) (*v1alpha1.Cluster, error) {
docker := executables.BuildDockerExecutable()
err := validations.CheckMinimumDockerVersion(ctx, docker)
if err != nil {
return nil, fmt.Errorf("failed to validate docker: %v", err)
}
validations.CheckDockerAllocatedMemory(ctx, docker)
clusterConfigFileExist := validations.FileExists(clusterConfigFile)
if !clusterConfigFileExist {
return nil, fmt.Errorf("the cluster config file %s does not exist", clusterConfigFile)
}
clusterConfig, err := v1alpha1.GetAndValidateClusterConfig(clusterConfigFile)
if err != nil {
return nil, fmt.Errorf("the cluster config file provided is invalid: %v", err)
}
return clusterConfig, nil
}
| 29 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/version"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Get the eksctl anywhere version",
Long: "This command prints the version of eksctl anywhere",
RunE: func(cmd *cobra.Command, args []string) error {
return printVersion()
},
}
func init() {
rootCmd.AddCommand(versionCmd)
}
func printVersion() error {
fmt.Println(version.Get().GitVersion)
return nil
}
| 28 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var vsphereCmd = &cobra.Command{
Use: "vsphere",
Short: "Utility vsphere operations",
Long: "Use eksctl anywhere vsphere to perform utility operations on vsphere",
}
func init() {
expCmd.AddCommand(vsphereCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var vsphereSetupCmd = &cobra.Command{
Use: "setup",
Short: "Setup vSphere objects",
Long: "Use eksctl anywhere vsphere setup to configure vSphere objects",
}
func init() {
vsphereCmd.AddCommand(vsphereSetupCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/setupuser"
)
type vSphereSetupUserOptions struct {
fileName string
force bool
password string
}
var setupUserOptions = &vSphereSetupUserOptions{}
var setupUserCmd = &cobra.Command{
Use: "user -f <config-file> [flags]",
Short: "Setup vSphere user",
Long: "Use eksctl anywhere vsphere setup user to configure EKS Anywhere vSphere user",
PreRunE: bindFlagsToViper,
SilenceUsage: false,
RunE: setupUserOptions.setupUser,
}
func init() {
vsphereSetupCmd.AddCommand(setupUserCmd)
setupUserCmd.Flags().StringVarP(&setupUserOptions.fileName, "filename", "f", "", "Filename containing vsphere setup configuration")
setupUserCmd.Flags().StringVarP(&setupUserOptions.password, "password", "p", "", "Password for creating new user")
setupUserCmd.Flags().BoolVarP(&setupUserOptions.force, "force", "", false, "Force flag. When set, setup user will proceed even if the group and role objects already exist. Mutually exclusive with --password flag, as it expects the user to already exist. default: false")
if err := setupUserCmd.MarkFlagRequired("filename"); err != nil {
log.Fatalf("error marking flag as required: %v", err)
}
}
func (setupUserOptions *vSphereSetupUserOptions) setupUser(cmd *cobra.Command, _ []string) error {
ctx := cmd.Context()
if setupUserOptions.force && setupUserOptions.password != "" {
return fmt.Errorf("--password and --force are mutually exclusive. --force may only be run on an existing user")
}
cfg, err := setupuser.GenerateConfig(ctx, setupUserOptions.fileName)
if err != nil {
return err
}
err = setupuser.SetupGOVCEnv(ctx, cfg)
if err != nil {
return err
}
deps, err := dependencies.NewFactory().WithGovc().Build(ctx)
if err != nil {
return err
}
defer close(ctx, deps)
// when using the force flag we assume the user already exists
if !setupUserOptions.force {
err = deps.Govc.CreateUser(ctx, cfg.Spec.Username, setupUserOptions.password)
if err != nil {
return err
}
err = setupuser.ValidateVSphereObjects(ctx, cfg, deps.Govc)
if err != nil {
return err
}
}
err = setupuser.Run(ctx, cfg, deps.Govc)
if err != nil {
return err
}
return nil
}
| 83 |
eks-anywhere | aws | Go | package artifacts
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"github.com/pkg/errors"
)
const (
realmKey = "realm="
serviceKey = "service="
scopeKey = "scope="
)
type CheckImageExistence struct {
ImageUri string
AuthHeader string
}
type tokenResponse struct {
Token string `json:"token"`
}
func (d CheckImageExistence) Run(ctx context.Context) error {
registry, repository, tag, error := splitImageUri(d.ImageUri)
if error != nil {
return error
}
requestUrl := fmt.Sprintf("https://%s/v2/%s/manifests/%s", registry, repository, tag)
req, err := http.NewRequest("GET", requestUrl, nil)
if err != nil {
return errors.Cause(err)
}
req.Header.Add("Authorization", d.AuthHeader)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return fmt.Errorf("requested image not found")
} else if resp.StatusCode == http.StatusUnauthorized && len(d.AuthHeader) == 0 {
splits := strings.Split(resp.Header.Get("www-authenticate"), ",")
var realm, service, scope string
for _, split := range splits {
if strings.Contains(split, realmKey) {
startIndex := strings.Index(split, realmKey) + len(realmKey)
realm = strings.Trim(split[startIndex:], "\"")
} else if strings.Contains(split, serviceKey) {
startIndex := strings.Index(split, serviceKey) + len(serviceKey)
service = strings.Trim(split[startIndex:], "\"")
} else if strings.Contains(split, scopeKey) {
startIndex := strings.Index(split, scopeKey) + len(scopeKey)
scope = strings.Trim(split[startIndex:], "\"")
}
}
token, err := getRegistryToken(realm, service, scope)
if err != nil {
return err
}
d.AuthHeader = "Bearer " + token
return d.Run(ctx)
} else if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unknown response: %s", resp.Status)
}
return nil
}
func getRegistryToken(realm, service, scope string) (string, error) {
requestUrl := fmt.Sprintf("%s?service=\"%s\"&scope=\"%s\"", realm, service, scope)
req, err := http.NewRequest("GET", requestUrl, nil)
if err != nil {
return "", errors.Cause(err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to token from %s", requestUrl)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
tokenResp := tokenResponse{}
if err := json.Unmarshal(body, &tokenResp); err != nil {
return "", err
}
return tokenResp.Token, nil
}
func splitImageUri(imageUri string) (string, string, string, error) {
indexOfSlash := strings.Index(imageUri, "/")
if indexOfSlash < 0 {
return "", "", "", errors.Errorf("Invalid URI: %s", imageUri)
}
registry := imageUri[:indexOfSlash]
imageUriSplit := strings.Split(imageUri[len(registry)+1:], ":")
if len(imageUriSplit) < 2 {
return "", "", "", errors.Errorf("Invalid URI: %s", imageUri)
}
repository := strings.Replace(imageUriSplit[0], registry+"/", "", -1)
tag := imageUriSplit[1]
return registry, repository, tag, nil
}
| 119 |
eks-anywhere | aws | Go | package artifacts_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts"
)
type checkImageExistenceTest struct {
*WithT
ctx context.Context
command *artifacts.CheckImageExistence
}
func newCheckImageExistenceTest(t *testing.T) *checkImageExistenceTest {
return &checkImageExistenceTest{
WithT: NewWithT(t),
ctx: context.Background(),
command: &artifacts.CheckImageExistence{},
}
}
func TestCheckImageExistenceRun(t *testing.T) {
tt := newCheckImageExistenceTest(t)
// Default image URI
tt.command.ImageUri = "public.ecr.aws/bottlerocket/bottlerocket-admin:v0.8.0"
tt.Expect(tt.command.Run(tt.ctx)).To(Succeed())
// Mirrored image URI
tt.command.ImageUri = "public.ecr.aws:443/bottlerocket/bottlerocket-admin:v0.8.0"
tt.Expect(tt.command.Run(tt.ctx)).To(Succeed())
// Nonexisting mirrored image URI
tt.command.ImageUri = "public.ecr.aws:443/xxx"
tt.Expect(tt.command.Run(tt.ctx)).NotTo(Succeed())
// Invalid URI #1
tt.command.ImageUri = ""
tt.Expect(tt.command.Run(tt.ctx)).NotTo(Succeed())
// Invalid URI #2
tt.command.ImageUri = "public.ecr.aws/"
tt.Expect(tt.command.Run(tt.ctx)).NotTo(Succeed())
// Invalid URI #3
tt.command.ImageUri = "public.ecr.aws:443/"
tt.Expect(tt.command.Run(tt.ctx)).NotTo(Succeed())
}
| 53 |
eks-anywhere | aws | Go | package artifacts
import (
"context"
"fmt"
"os"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/version"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type Reader interface {
ReadBundlesForVersion(eksaVersion string) (*releasev1.Bundles, error)
ReadImagesFromBundles(ctx context.Context, bundles *releasev1.Bundles) ([]releasev1.Image, error)
ReadChartsFromBundles(ctx context.Context, bundles *releasev1.Bundles) []releasev1.Image
}
type ImageMover interface {
Move(ctx context.Context, artifacts ...string) error
}
type ChartDownloader interface {
Download(ctx context.Context, artifacts ...string) error
}
type ManifestDownloader interface {
Download(ctx context.Context, bundles *releasev1.Bundles)
}
type Packager interface {
Package(folder string, dstFile string) error
}
type Download struct {
Reader Reader
FileReader *files.Reader
Version version.Info
BundlesImagesDownloader ImageMover
EksaToolsImageDownloader ImageMover
ChartDownloader ChartDownloader
Packager Packager
TmpDowloadFolder string
DstFile string
ManifestDownloader ManifestDownloader
BundlesOverride string
}
func (d Download) Run(ctx context.Context) error {
if err := os.MkdirAll(d.TmpDowloadFolder, os.ModePerm); err != nil {
return fmt.Errorf("creating tmp artifact download folder: %v", err)
}
var b *releasev1.Bundles
var err error
if d.BundlesOverride != "" {
b, err = bundles.Read(d.FileReader, d.BundlesOverride)
if err != nil {
return fmt.Errorf("reading bundles override: %v", err)
}
} else {
b, err = d.Reader.ReadBundlesForVersion(d.Version.GitVersion)
if err != nil {
return fmt.Errorf("reading bundles for version %s: %v", d.Version.GitVersion, err)
}
}
toolsImage := b.DefaultEksAToolsImage().VersionedImage()
if err = d.EksaToolsImageDownloader.Move(ctx, toolsImage); err != nil {
return fmt.Errorf("downloading eksa tools image: %v", err)
}
images, err := d.Reader.ReadImagesFromBundles(ctx, b)
if err != nil {
return fmt.Errorf("downloading images: %v", err)
}
if err = d.BundlesImagesDownloader.Move(ctx, removeFromSlice(artifactNames(images), toolsImage)...); err != nil {
return err
}
charts := d.Reader.ReadChartsFromBundles(ctx, b)
d.ManifestDownloader.Download(ctx, b)
if err := d.ChartDownloader.Download(ctx, artifactNames(charts)...); err != nil {
return err
}
logger.Info("Packaging artifacts", "dst", d.DstFile)
if err := d.Packager.Package(d.TmpDowloadFolder, d.DstFile); err != nil {
return err
}
if err := os.RemoveAll(d.TmpDowloadFolder); err != nil {
return fmt.Errorf("deleting tmp artifact download folder: %v", err)
}
return nil
}
func artifactNames(artifacts []releasev1.Image) []string {
taggedArtifacts := make([]string, 0, len(artifacts))
for _, a := range artifacts {
taggedArtifacts = append(taggedArtifacts, a.VersionedImage())
}
return taggedArtifacts
}
func removeFromSlice(s []string, toRemove string) []string {
index := 0
for _, i := range s {
if i != toRemove {
s[index] = i
index++
}
}
return s[:index]
}
| 123 |
eks-anywhere | aws | Go | package artifacts_test
import (
"context"
"errors"
"os"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts/mocks"
"github.com/aws/eks-anywhere/pkg/version"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type downloadArtifactsTest struct {
*WithT
ctx context.Context
reader *mocks.MockReader
mover *mocks.MockImageMover
downloader *mocks.MockChartDownloader
toolsDownloader *mocks.MockImageMover
packager *mocks.MockPackager
command *artifacts.Download
images, charts []releasev1.Image
bundles *releasev1.Bundles
manifestDownloader *mocks.MockManifestDownloader
}
func newDownloadArtifactsTest(t *testing.T) *downloadArtifactsTest {
downloadFolder := "tmp-folder"
t.Cleanup(func() {
os.RemoveAll(downloadFolder)
})
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
mover := mocks.NewMockImageMover(ctrl)
toolsDownloader := mocks.NewMockImageMover(ctrl)
downloader := mocks.NewMockChartDownloader(ctrl)
packager := mocks.NewMockPackager(ctrl)
manifestDownloader := mocks.NewMockManifestDownloader(ctrl)
images := []releasev1.Image{
{
Name: "image 1",
URI: "image1:1",
},
{
Name: "image 2",
URI: "image2:1",
},
{
Name: "tools",
URI: "tools:v1.0.0",
},
}
charts := []releasev1.Image{
{
Name: "chart 1",
URI: "chart:v1.0.0",
},
{
Name: "chart 2",
URI: "package-chart:v1.0.0",
},
}
return &downloadArtifactsTest{
WithT: NewWithT(t),
ctx: context.Background(),
reader: reader,
mover: mover,
toolsDownloader: toolsDownloader,
downloader: downloader,
packager: packager,
images: images,
charts: charts,
command: &artifacts.Download{
Reader: reader,
BundlesImagesDownloader: mover,
EksaToolsImageDownloader: toolsDownloader,
ChartDownloader: downloader,
Packager: packager,
Version: version.Info{GitVersion: "v1.0.0"},
TmpDowloadFolder: downloadFolder,
DstFile: "artifacts.tar",
ManifestDownloader: manifestDownloader,
},
bundles: &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
Eksa: releasev1.EksaBundle{
CliTools: releasev1.Image{
URI: "tools:v1.0.0",
},
},
},
},
},
},
manifestDownloader: manifestDownloader,
}
}
func TestDownloadRun(t *testing.T) {
tt := newDownloadArtifactsTest(t)
tt.reader.EXPECT().ReadBundlesForVersion("v1.0.0").Return(tt.bundles, nil)
tt.toolsDownloader.EXPECT().Move(tt.ctx, "tools:v1.0.0")
tt.reader.EXPECT().ReadImagesFromBundles(tt.ctx, tt.bundles).Return(tt.images, nil)
tt.mover.EXPECT().Move(tt.ctx, "image1:1", "image2:1")
tt.reader.EXPECT().ReadChartsFromBundles(tt.ctx, tt.bundles).Return(tt.charts)
tt.downloader.EXPECT().Download(tt.ctx, "chart:v1.0.0", "package-chart:v1.0.0")
tt.packager.EXPECT().Package("tmp-folder", "artifacts.tar")
tt.manifestDownloader.EXPECT().Download(tt.ctx, tt.bundles)
tt.Expect(tt.command.Run(tt.ctx)).To(Succeed())
}
func TestDownloadErrorReadingImages(t *testing.T) {
tt := newDownloadArtifactsTest(t)
tt.reader.EXPECT().ReadBundlesForVersion("v1.0.0").Return(tt.bundles, nil)
tt.toolsDownloader.EXPECT().Move(tt.ctx, "tools:v1.0.0")
tt.reader.EXPECT().ReadImagesFromBundles(tt.ctx, tt.bundles).Return(nil, errors.New("error reading images"))
tt.Expect(tt.command.Run(tt.ctx)).To(MatchError(ContainSubstring("downloading images: error reading images")))
}
| 130 |
eks-anywhere | aws | Go | package artifacts
import (
"context"
"fmt"
"os"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type Import struct {
Reader Reader
Bundles *releasev1.Bundles
ImageMover ImageMover
ChartImporter ChartImporter
TmpArtifactsFolder string
FileImporter FileImporter
}
type ChartImporter interface {
Import(ctx context.Context, charts ...string) error
}
type FileImporter interface {
Push(ctx context.Context, bundles *releasev1.Bundles)
}
func (i Import) Run(ctx context.Context) error {
images, err := i.Reader.ReadImagesFromBundles(ctx, i.Bundles)
if err != nil {
return fmt.Errorf("downloading images: %v", err)
}
if err = i.ImageMover.Move(ctx, artifactNames(images)...); err != nil {
return err
}
charts := i.Reader.ReadChartsFromBundles(ctx, i.Bundles)
if err := i.ChartImporter.Import(ctx, artifactNames(charts)...); err != nil {
return err
}
i.FileImporter.Push(ctx, i.Bundles)
if err := os.RemoveAll(i.TmpArtifactsFolder); err != nil {
return fmt.Errorf("deleting tmp artifact import folder: %v", err)
}
return nil
}
| 52 |
eks-anywhere | aws | Go | package artifacts_test
import (
"context"
"os"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts/mocks"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type importArtifactsTest struct {
*WithT
ctx context.Context
reader *mocks.MockReader
mover *mocks.MockImageMover
importer *mocks.MockChartImporter
command *artifacts.Import
images, charts []releasev1.Image
bundles *releasev1.Bundles
fileImporter *mocks.MockFileImporter
}
func newImportArtifactsTest(t *testing.T) *importArtifactsTest {
downloadFolder := "tmp-folder"
t.Cleanup(func() {
os.RemoveAll(downloadFolder)
})
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
mover := mocks.NewMockImageMover(ctrl)
importer := mocks.NewMockChartImporter(ctrl)
fileImporter := mocks.NewMockFileImporter(ctrl)
images := []releasev1.Image{
{
Name: "image 1",
URI: "image1:1",
},
{
Name: "image 2",
URI: "image2:1",
},
}
charts := []releasev1.Image{
{
Name: "chart 1",
URI: "chart:v1.0.0",
},
{
Name: "chart 2",
URI: "package-chart:v1.0.0",
},
}
bundles := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{},
},
},
}
return &importArtifactsTest{
WithT: NewWithT(t),
ctx: context.Background(),
reader: reader,
mover: mover,
images: images,
charts: charts,
importer: importer,
command: &artifacts.Import{
Reader: reader,
ImageMover: mover,
ChartImporter: importer,
TmpArtifactsFolder: downloadFolder,
Bundles: bundles,
FileImporter: fileImporter,
},
bundles: bundles,
fileImporter: fileImporter,
}
}
func TestImportRun(t *testing.T) {
tt := newImportArtifactsTest(t)
tt.reader.EXPECT().ReadImagesFromBundles(tt.ctx, tt.bundles).Return(tt.images, nil)
tt.mover.EXPECT().Move(tt.ctx, "image1:1", "image2:1")
tt.reader.EXPECT().ReadChartsFromBundles(tt.ctx, tt.bundles).Return(tt.charts)
tt.fileImporter.EXPECT().Push(tt.ctx, tt.bundles)
tt.importer.EXPECT().Import(tt.ctx, "chart:v1.0.0", "package-chart:v1.0.0")
tt.Expect(tt.command.Run(tt.ctx)).To(Succeed())
}
| 99 |
eks-anywhere | aws | Go | package artifacts
import (
"context"
"fmt"
"os"
"github.com/aws/eks-anywhere/pkg/logger"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type ImportToolsImage struct {
Bundles *releasev1.Bundles
ImageMover ImageMover
UnPackager UnPackager
InputFile string
TmpArtifactsFolder string
}
type UnPackager interface {
UnPackage(orgFile, dstFolder string) error
}
func (i ImportToolsImage) Run(ctx context.Context) error {
if err := os.MkdirAll(i.TmpArtifactsFolder, os.ModePerm); err != nil {
return fmt.Errorf("creating tmp artifact folder to unpackage tools image: %v", err)
}
logger.Info("Unpackaging artifacts", "dst", i.TmpArtifactsFolder)
if err := i.UnPackager.UnPackage(i.InputFile, i.TmpArtifactsFolder); err != nil {
return err
}
toolsImage := i.Bundles.DefaultEksAToolsImage().VersionedImage()
if err := i.ImageMover.Move(ctx, toolsImage); err != nil {
return fmt.Errorf("importing tools image: %v", err)
}
return nil
}
| 42 |
eks-anywhere | aws | Go | package artifacts_test
import (
"context"
"os"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts/mocks"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type importToolsImageTest struct {
*WithT
ctx context.Context
mover *mocks.MockImageMover
unpackager *mocks.MockUnPackager
command *artifacts.ImportToolsImage
bundles *releasev1.Bundles
}
func newImportToolsImageTest(t *testing.T) *importToolsImageTest {
downloadFolder := "tmp-folder"
t.Cleanup(func() {
os.RemoveAll(downloadFolder)
})
ctrl := gomock.NewController(t)
mover := mocks.NewMockImageMover(ctrl)
unpackager := mocks.NewMockUnPackager(ctrl)
bundles := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
Eksa: releasev1.EksaBundle{
CliTools: releasev1.Image{
URI: "tools:v1.0.0",
},
},
},
},
},
}
return &importToolsImageTest{
WithT: NewWithT(t),
ctx: context.Background(),
mover: mover,
unpackager: unpackager,
command: &artifacts.ImportToolsImage{
ImageMover: mover,
UnPackager: unpackager,
TmpArtifactsFolder: downloadFolder,
Bundles: bundles,
InputFile: "tools_image.tar",
},
bundles: bundles,
}
}
func TestImportToolsImageRun(t *testing.T) {
tt := newImportToolsImageTest(t)
tt.unpackager.EXPECT().UnPackage(tt.command.InputFile, tt.command.TmpArtifactsFolder)
tt.mover.EXPECT().Move(tt.ctx, "tools:v1.0.0")
tt.Expect(tt.command.Run(tt.ctx)).To(Succeed())
}
| 71 |
eks-anywhere | aws | Go | package artifacts
import (
"context"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type Noop struct{}
func (*Noop) Download(ctx context.Context, bundles *releasev1.Bundles) {}
func (*Noop) Push(ctx context.Context, bundles *releasev1.Bundles) {}
| 14 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: cmd/eksctl-anywhere/cmd/internal/commands/artifacts/download.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
// MockReader is a mock of Reader interface.
type MockReader struct {
ctrl *gomock.Controller
recorder *MockReaderMockRecorder
}
// MockReaderMockRecorder is the mock recorder for MockReader.
type MockReaderMockRecorder struct {
mock *MockReader
}
// NewMockReader creates a new mock instance.
func NewMockReader(ctrl *gomock.Controller) *MockReader {
mock := &MockReader{ctrl: ctrl}
mock.recorder = &MockReaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockReader) EXPECT() *MockReaderMockRecorder {
return m.recorder
}
// ReadBundlesForVersion mocks base method.
func (m *MockReader) ReadBundlesForVersion(eksaVersion string) (*v1alpha1.Bundles, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadBundlesForVersion", eksaVersion)
ret0, _ := ret[0].(*v1alpha1.Bundles)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReadBundlesForVersion indicates an expected call of ReadBundlesForVersion.
func (mr *MockReaderMockRecorder) ReadBundlesForVersion(eksaVersion interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadBundlesForVersion", reflect.TypeOf((*MockReader)(nil).ReadBundlesForVersion), eksaVersion)
}
// ReadChartsFromBundles mocks base method.
func (m *MockReader) ReadChartsFromBundles(ctx context.Context, bundles *v1alpha1.Bundles) []v1alpha1.Image {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadChartsFromBundles", ctx, bundles)
ret0, _ := ret[0].([]v1alpha1.Image)
return ret0
}
// ReadChartsFromBundles indicates an expected call of ReadChartsFromBundles.
func (mr *MockReaderMockRecorder) ReadChartsFromBundles(ctx, bundles interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadChartsFromBundles", reflect.TypeOf((*MockReader)(nil).ReadChartsFromBundles), ctx, bundles)
}
// ReadImagesFromBundles mocks base method.
func (m *MockReader) ReadImagesFromBundles(ctx context.Context, bundles *v1alpha1.Bundles) ([]v1alpha1.Image, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadImagesFromBundles", ctx, bundles)
ret0, _ := ret[0].([]v1alpha1.Image)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReadImagesFromBundles indicates an expected call of ReadImagesFromBundles.
func (mr *MockReaderMockRecorder) ReadImagesFromBundles(ctx, bundles interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadImagesFromBundles", reflect.TypeOf((*MockReader)(nil).ReadImagesFromBundles), ctx, bundles)
}
// MockImageMover is a mock of ImageMover interface.
type MockImageMover struct {
ctrl *gomock.Controller
recorder *MockImageMoverMockRecorder
}
// MockImageMoverMockRecorder is the mock recorder for MockImageMover.
type MockImageMoverMockRecorder struct {
mock *MockImageMover
}
// NewMockImageMover creates a new mock instance.
func NewMockImageMover(ctrl *gomock.Controller) *MockImageMover {
mock := &MockImageMover{ctrl: ctrl}
mock.recorder = &MockImageMoverMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockImageMover) EXPECT() *MockImageMoverMockRecorder {
return m.recorder
}
// Move mocks base method.
func (m *MockImageMover) Move(ctx context.Context, artifacts ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range artifacts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Move", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Move indicates an expected call of Move.
func (mr *MockImageMoverMockRecorder) Move(ctx interface{}, artifacts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, artifacts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Move", reflect.TypeOf((*MockImageMover)(nil).Move), varargs...)
}
// MockChartDownloader is a mock of ChartDownloader interface.
type MockChartDownloader struct {
ctrl *gomock.Controller
recorder *MockChartDownloaderMockRecorder
}
// MockChartDownloaderMockRecorder is the mock recorder for MockChartDownloader.
type MockChartDownloaderMockRecorder struct {
mock *MockChartDownloader
}
// NewMockChartDownloader creates a new mock instance.
func NewMockChartDownloader(ctrl *gomock.Controller) *MockChartDownloader {
mock := &MockChartDownloader{ctrl: ctrl}
mock.recorder = &MockChartDownloaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockChartDownloader) EXPECT() *MockChartDownloaderMockRecorder {
return m.recorder
}
// Download mocks base method.
func (m *MockChartDownloader) Download(ctx context.Context, artifacts ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range artifacts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Download", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Download indicates an expected call of Download.
func (mr *MockChartDownloaderMockRecorder) Download(ctx interface{}, artifacts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, artifacts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockChartDownloader)(nil).Download), varargs...)
}
// MockManifestDownloader is a mock of ManifestDownloader interface.
type MockManifestDownloader struct {
ctrl *gomock.Controller
recorder *MockManifestDownloaderMockRecorder
}
// MockManifestDownloaderMockRecorder is the mock recorder for MockManifestDownloader.
type MockManifestDownloaderMockRecorder struct {
mock *MockManifestDownloader
}
// NewMockManifestDownloader creates a new mock instance.
func NewMockManifestDownloader(ctrl *gomock.Controller) *MockManifestDownloader {
mock := &MockManifestDownloader{ctrl: ctrl}
mock.recorder = &MockManifestDownloaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManifestDownloader) EXPECT() *MockManifestDownloaderMockRecorder {
return m.recorder
}
// Download mocks base method.
func (m *MockManifestDownloader) Download(ctx context.Context, bundles *v1alpha1.Bundles) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Download", ctx, bundles)
}
// Download indicates an expected call of Download.
func (mr *MockManifestDownloaderMockRecorder) Download(ctx, bundles interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockManifestDownloader)(nil).Download), ctx, bundles)
}
// MockPackager is a mock of Packager interface.
type MockPackager struct {
ctrl *gomock.Controller
recorder *MockPackagerMockRecorder
}
// MockPackagerMockRecorder is the mock recorder for MockPackager.
type MockPackagerMockRecorder struct {
mock *MockPackager
}
// NewMockPackager creates a new mock instance.
func NewMockPackager(ctrl *gomock.Controller) *MockPackager {
mock := &MockPackager{ctrl: ctrl}
mock.recorder = &MockPackagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPackager) EXPECT() *MockPackagerMockRecorder {
return m.recorder
}
// Package mocks base method.
func (m *MockPackager) Package(folder, dstFile string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Package", folder, dstFile)
ret0, _ := ret[0].(error)
return ret0
}
// Package indicates an expected call of Package.
func (mr *MockPackagerMockRecorder) Package(folder, dstFile interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Package", reflect.TypeOf((*MockPackager)(nil).Package), folder, dstFile)
}
| 237 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: cmd/eksctl-anywhere/cmd/internal/commands/artifacts/import.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
// MockChartImporter is a mock of ChartImporter interface.
type MockChartImporter struct {
ctrl *gomock.Controller
recorder *MockChartImporterMockRecorder
}
// MockChartImporterMockRecorder is the mock recorder for MockChartImporter.
type MockChartImporterMockRecorder struct {
mock *MockChartImporter
}
// NewMockChartImporter creates a new mock instance.
func NewMockChartImporter(ctrl *gomock.Controller) *MockChartImporter {
mock := &MockChartImporter{ctrl: ctrl}
mock.recorder = &MockChartImporterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockChartImporter) EXPECT() *MockChartImporterMockRecorder {
return m.recorder
}
// Import mocks base method.
func (m *MockChartImporter) Import(ctx context.Context, charts ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range charts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Import", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Import indicates an expected call of Import.
func (mr *MockChartImporterMockRecorder) Import(ctx interface{}, charts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, charts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Import", reflect.TypeOf((*MockChartImporter)(nil).Import), varargs...)
}
// MockFileImporter is a mock of FileImporter interface.
type MockFileImporter struct {
ctrl *gomock.Controller
recorder *MockFileImporterMockRecorder
}
// MockFileImporterMockRecorder is the mock recorder for MockFileImporter.
type MockFileImporterMockRecorder struct {
mock *MockFileImporter
}
// NewMockFileImporter creates a new mock instance.
func NewMockFileImporter(ctrl *gomock.Controller) *MockFileImporter {
mock := &MockFileImporter{ctrl: ctrl}
mock.recorder = &MockFileImporterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFileImporter) EXPECT() *MockFileImporterMockRecorder {
return m.recorder
}
// Push mocks base method.
func (m *MockFileImporter) Push(ctx context.Context, bundles *v1alpha1.Bundles) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Push", ctx, bundles)
}
// Push indicates an expected call of Push.
func (mr *MockFileImporterMockRecorder) Push(ctx, bundles interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockFileImporter)(nil).Push), ctx, bundles)
}
| 91 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: cmd/eksctl-anywhere/cmd/internal/commands/artifacts/import_tools_image.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockUnPackager is a mock of UnPackager interface.
type MockUnPackager struct {
ctrl *gomock.Controller
recorder *MockUnPackagerMockRecorder
}
// MockUnPackagerMockRecorder is the mock recorder for MockUnPackager.
type MockUnPackagerMockRecorder struct {
mock *MockUnPackager
}
// NewMockUnPackager creates a new mock instance.
func NewMockUnPackager(ctrl *gomock.Controller) *MockUnPackager {
mock := &MockUnPackager{ctrl: ctrl}
mock.recorder = &MockUnPackagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockUnPackager) EXPECT() *MockUnPackagerMockRecorder {
return m.recorder
}
// UnPackage mocks base method.
func (m *MockUnPackager) UnPackage(orgFile, dstFolder string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UnPackage", orgFile, dstFolder)
ret0, _ := ret[0].(error)
return ret0
}
// UnPackage indicates an expected call of UnPackage.
func (mr *MockUnPackagerMockRecorder) UnPackage(orgFile, dstFolder interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnPackage", reflect.TypeOf((*MockUnPackager)(nil).UnPackage), orgFile, dstFolder)
}
| 49 |
eks-anywhere | aws | Go | package main
import (
"os"
"github.com/aws/eks-anywhere/cmd/integration_test/cmd"
)
func main() {
if cmd.Execute() == nil {
os.Exit(0)
}
os.Exit(-1)
}
| 15 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var cleanUpInstancesCmd = &cobra.Command{
Use: "cleanup",
Short: "Clean up e2e resources",
Long: "Clean up resources created for e2e testing",
}
func init() {
integrationTestCmd.AddCommand(cleanUpInstancesCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/test/cleanup"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
maxAgeFlagName = "max-age"
tagFlagName = "tag"
)
var cleanUpAwsCmd = &cobra.Command{
Use: "aws",
Short: "Clean up e2e resources on aws",
Long: "Clean up resources created for e2e testing on aws",
SilenceUsage: true,
PreRun: preRunCleanUpAwsSetup,
RunE: func(cmd *cobra.Command, args []string) error {
err := cleanUpAwsTestResources(cmd.Context())
if err != nil {
logger.Fatal(err, "Failed to cleanup e2e resources on aws")
}
return nil
},
}
func preRunCleanUpAwsSetup(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
}
var requiredAwsCleanUpFlags = []string{storageBucketFlagName, maxAgeFlagName, tagFlagName}
func init() {
cleanUpInstancesCmd.AddCommand(cleanUpAwsCmd)
cleanUpAwsCmd.Flags().StringP(storageBucketFlagName, "s", "", "Name of s3 bucket used for e2e testing")
cleanUpAwsCmd.Flags().StringP(maxAgeFlagName, "a", "0", "Instance age in seconds after which it should be deleted")
cleanUpAwsCmd.Flags().StringP(tagFlagName, "t", "", "EC2 instance tag")
for _, flag := range requiredAwsCleanUpFlags {
if err := cleanUpAwsCmd.MarkFlagRequired(flag); err != nil {
log.Fatalf("Error marking flag %s as required: %v", flag, err)
}
}
}
func cleanUpAwsTestResources(ctx context.Context) error {
maxAge := viper.GetString(maxAgeFlagName)
storageBucket := viper.GetString(storageBucketFlagName)
tag := viper.GetString(tagFlagName)
err := cleanup.CleanUpAwsTestResources(storageBucket, maxAge, tag)
if err != nil {
return fmt.Errorf("running cleanup for aws test resources: %v", err)
}
return nil
}
| 72 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/test/cleanup"
"github.com/aws/eks-anywhere/pkg/logger"
)
var cleanUpCloudstackCmd = &cobra.Command{
Use: "cloudstack",
Short: "Clean up e2e vms on cloudstack",
Long: "Clean up vms created for e2e testing on cloudstack",
SilenceUsage: true,
PreRun: preRunCleanUpCloudstackSetup,
RunE: func(cmd *cobra.Command, args []string) error {
err := cleanUpCloudstackTestResources(cmd.Context())
if err != nil {
logger.Fatal(err, "Failed to cleanup e2e vms on cloudstack")
}
return nil
},
}
func preRunCleanUpCloudstackSetup(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
}
var requiredCloudstackCleanUpFlags = []string{clusterNameFlagName}
func init() {
cleanUpInstancesCmd.AddCommand(cleanUpCloudstackCmd)
cleanUpCloudstackCmd.Flags().StringP(clusterNameFlagName, "n", "", "Cluster name for associated vms")
for _, flag := range requiredCloudstackCleanUpFlags {
if err := cleanUpCloudstackCmd.MarkFlagRequired(flag); err != nil {
log.Fatalf("Error marking flag %s as required: %v", flag, err)
}
}
}
func cleanUpCloudstackTestResources(ctx context.Context) error {
clusterName := viper.GetString(clusterNameFlagName)
err := cleanup.CleanUpCloudstackTestResources(ctx, clusterName, false)
if err != nil {
return fmt.Errorf("running cleanup for cloudstack vms: %v", err)
}
return nil
}
| 62 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/test/cleanup"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
clusterNameFlagName = "cluster-name"
)
var cleanUpVsphereCmd = &cobra.Command{
Use: "vsphere",
Short: "Clean up e2e vms on vsphere vcenter",
Long: "Clean up vms created for e2e testing on vsphere vcenter",
SilenceUsage: true,
PreRun: preRunCleanUpVsphereSetup,
RunE: func(cmd *cobra.Command, args []string) error {
err := cleanUpVsphereTestResources(cmd.Context())
if err != nil {
logger.Fatal(err, "Failed to cleanup e2e vms on vsphere vcenter")
}
return nil
},
}
func preRunCleanUpVsphereSetup(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
}
var requiredVsphereCleanUpFlags = []string{clusterNameFlagName}
func init() {
cleanUpInstancesCmd.AddCommand(cleanUpVsphereCmd)
cleanUpVsphereCmd.Flags().StringP(clusterNameFlagName, "n", "", "Cluster name for associated vms")
for _, flag := range requiredVsphereCleanUpFlags {
if err := cleanUpVsphereCmd.MarkFlagRequired(flag); err != nil {
log.Fatalf("Error marking flag %s as required: %v", flag, err)
}
}
}
func cleanUpVsphereTestResources(ctx context.Context) error {
clusterName := viper.GetString(clusterNameFlagName)
err := cleanup.CleanUpVsphereTestResources(ctx, clusterName)
if err != nil {
return fmt.Errorf("running cleanup for vsphere vcenter vms: %v", err)
}
return nil
}
| 66 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/logger"
)
var rootCmd = &cobra.Command{
Use: "integration_test",
Short: "Integration test",
Long: `Run integration test`,
PersistentPreRun: rootPersistentPreRun,
}
func init() {
rootCmd.PersistentFlags().IntP("verbosity", "v", 0, "Set the log level verbosity")
if err := viper.BindPFlags(rootCmd.PersistentFlags()); err != nil {
log.Fatalf("failed to bind flags for root: %v", err)
}
}
func rootPersistentPreRun(cmd *cobra.Command, args []string) {
if err := initLogger(); err != nil {
log.Fatal(err)
}
}
func initLogger() error {
if err := logger.Init(logger.Options{
Level: viper.GetInt("verbosity"),
}); err != nil {
return fmt.Errorf("failed init zap logger in root command: %v", err)
}
return nil
}
func Execute() error {
return rootCmd.Execute()
}
| 46 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/test/e2e"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
storageBucketFlagName = "storage-bucket"
jobIdFlagName = "job-id"
instanceProfileFlagName = "instance-profile-name"
regexFlagName = "regex"
maxInstancesFlagName = "max-instances"
maxConcurrentTestsFlagName = "max-concurrent-tests"
skipFlagName = "skip"
bundlesOverrideFlagName = "bundles-override"
cleanupVmsFlagName = "cleanup-vms"
testReportFolderFlagName = "test-report-folder"
branchNameFlagName = "branch-name"
instanceConfigFlagName = "instance-config"
baremetalBranchFlagName = "baremetal-branch"
)
var runE2ECmd = &cobra.Command{
Use: "run",
Short: "Run E2E",
Long: "Run end to end tests",
SilenceUsage: true,
PreRun: preRunSetup,
RunE: func(cmd *cobra.Command, args []string) error {
err := runE2E(cmd.Context())
if err != nil {
logger.Fatal(err, "Failed to run e2e test")
}
return nil
},
}
var requiredFlags = []string{instanceConfigFlagName, storageBucketFlagName, jobIdFlagName, instanceProfileFlagName}
func preRunSetup(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
}
func init() {
integrationTestCmd.AddCommand(runE2ECmd)
runE2ECmd.Flags().StringP(instanceConfigFlagName, "c", "", "File path to the instance-config.yml config")
runE2ECmd.Flags().StringP(storageBucketFlagName, "s", "", "S3 bucket name to store eks-a binary")
runE2ECmd.Flags().StringP(jobIdFlagName, "j", "", "Id of the job being run")
runE2ECmd.Flags().StringP(instanceProfileFlagName, "i", "", "IAM instance profile name to attach to ssm instances")
runE2ECmd.Flags().StringP(regexFlagName, "r", "", "Run only those tests and examples matching the regular expression. Equivalent to go test -run")
runE2ECmd.Flags().IntP(maxInstancesFlagName, "m", 1, "Run tests in parallel on same instance within the max EC2 instance count")
runE2ECmd.Flags().IntP(maxConcurrentTestsFlagName, "p", 1, "Maximum number of parallel tests that can be run at a time")
runE2ECmd.Flags().StringSlice(skipFlagName, nil, "List of tests to skip")
runE2ECmd.Flags().Bool(bundlesOverrideFlagName, false, "Flag to indicate if the tests should run with a bundles override")
runE2ECmd.Flags().Bool(cleanupVmsFlagName, false, "Flag to indicate if VSphere VMs should be cleaned up automatically as tests complete")
runE2ECmd.Flags().String(testReportFolderFlagName, "", "Folder destination for JUnit tests reports")
runE2ECmd.Flags().String(branchNameFlagName, "main", "EKS-A origin branch from where the tests are being run")
runE2ECmd.Flags().String(baremetalBranchFlagName, "main", "Branch for baremetal tests to run on")
for _, flag := range requiredFlags {
if err := runE2ECmd.MarkFlagRequired(flag); err != nil {
log.Fatalf("Error marking flag %s as required: %v", flag, err)
}
}
}
func runE2E(ctx context.Context) error {
instanceConfigFile := viper.GetString(instanceConfigFlagName)
storageBucket := viper.GetString(storageBucketFlagName)
jobId := viper.GetString(jobIdFlagName)
instanceProfileName := viper.GetString(instanceProfileFlagName)
testRegex := viper.GetString(regexFlagName)
maxInstances := viper.GetInt(maxInstancesFlagName)
maxConcurrentTests := viper.GetInt(maxConcurrentTestsFlagName)
testsToSkip := viper.GetStringSlice(skipFlagName)
bundlesOverride := viper.GetBool(bundlesOverrideFlagName)
cleanupVms := viper.GetBool(cleanupVmsFlagName)
testReportFolder := viper.GetString(testReportFolderFlagName)
branchName := viper.GetString(branchNameFlagName)
baremetalBranchName := viper.GetString(baremetalBranchFlagName)
runConf := e2e.ParallelRunConf{
MaxInstances: maxInstances,
MaxConcurrentTests: maxConcurrentTests,
InstanceProfileName: instanceProfileName,
StorageBucket: storageBucket,
JobId: jobId,
Regex: testRegex,
TestsToSkip: testsToSkip,
BundlesOverride: bundlesOverride,
CleanupVms: cleanupVms,
TestReportFolder: testReportFolder,
BranchName: branchName,
TestInstanceConfigFile: instanceConfigFile,
BaremetalBranchName: baremetalBranchName,
Logger: logger.Get(),
}
err := e2e.RunTestsInParallel(runConf)
if err != nil {
return fmt.Errorf("running e2e tests: %v", err)
}
return nil
}
| 120 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var integrationTestCmd = &cobra.Command{
Use: "e2e",
Short: "Integration test",
Long: "Run integration test on eks-d",
}
func init() {
rootCmd.AddCommand(integrationTestCmd)
}
| 16 |
eks-anywhere | aws | Go | package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack"
)
// CloudStackDatacenterReconciler reconciles a CloudStackDatacenterConfig object.
type CloudStackDatacenterReconciler struct {
client client.Client
validatorRegistry cloudstack.ValidatorRegistry
}
// NewCloudStackDatacenterReconciler creates a new instance of the CloudStackDatacenterReconciler struct.
func NewCloudStackDatacenterReconciler(client client.Client, validatorRegistry cloudstack.ValidatorRegistry) *CloudStackDatacenterReconciler {
return &CloudStackDatacenterReconciler{
client: client,
validatorRegistry: validatorRegistry,
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *CloudStackDatacenterReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&anywherev1.CloudStackDatacenterConfig{}).
Complete(r)
}
// Reconcile implements the reconcile.Reconciler interface.
func (r *CloudStackDatacenterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
log := ctrl.LoggerFrom(ctx)
// Fetch the CloudStackDatacenter object
cloudstackDatacenter := &anywherev1.CloudStackDatacenterConfig{}
if err := r.client.Get(ctx, req.NamespacedName, cloudstackDatacenter); err != nil {
return ctrl.Result{}, fmt.Errorf("failed getting cloudstack datacenter config: %v", err)
}
// Initialize the patch helper
patchHelper, err := patch.NewHelper(cloudstackDatacenter, r.client)
if err != nil {
return ctrl.Result{}, err
}
defer func() {
// Always attempt to patch the object and status after each reconciliation.
patchOpts := []patch.Option{}
if reterr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
}
if err := patchHelper.Patch(ctx, cloudstackDatacenter, patchOpts...); err != nil {
log.Error(reterr, "patching CloudStackDatacenterConfig")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
}()
// There's no need to go any further if the cloudstackDatacenter is marked for deletion.
if !cloudstackDatacenter.DeletionTimestamp.IsZero() {
return ctrl.Result{}, reterr
}
result, err := r.reconcile(ctx, cloudstackDatacenter, log)
if err != nil {
log.Error(err, "reconciling CloudStackDatacenterConfig")
}
return result, err
}
func (r *CloudStackDatacenterReconciler) reconcile(ctx context.Context, cloudstackDatacenterConfig *anywherev1.CloudStackDatacenterConfig, log logr.Logger) (_ ctrl.Result, reterr error) {
cloudstackDatacenterConfig.SetDefaults()
execConfig, err := cloudstack.GetCloudstackExecConfig(ctx, r.client, cloudstackDatacenterConfig)
if err != nil {
return ctrl.Result{}, err
}
validator, err := r.validatorRegistry.Get(execConfig)
if err != nil {
return ctrl.Result{}, err
}
// Run validations with validator as Get will construct CMK each time
if err := validator.ValidateCloudStackDatacenterConfig(ctx, cloudstackDatacenterConfig); err != nil {
log.Error(err, "validating CloudStackDatacenterConfig")
return ctrl.Result{}, err
}
cloudstackDatacenterConfig.Status.SpecValid = true
return ctrl.Result{}, nil
}
| 99 |
eks-anywhere | aws | Go | package controllers_test
import (
"context"
"errors"
"testing"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/controllers"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
)
func TestCloudStackDatacenterReconcilerSetupWithManager(t *testing.T) {
client := env.Client()
r := controllers.NewCloudStackDatacenterReconciler(client, nil)
g := NewWithT(t)
g.Expect(r.SetupWithManager(env.Manager())).To(Succeed())
}
func TestCloudStackDatacenterReconcilerSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
secrets := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "testCred",
Namespace: constants.EksaSystemNamespace,
},
Data: map[string][]byte{
decoder.APIKeyKey: []byte("test-key1"),
decoder.APIUrlKey: []byte("http://1.1.1.1:8080/client/api"),
decoder.SecretKeyKey: []byte("test-secret1"),
},
}
objs := []runtime.Object{dcConfig, secrets}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
ctrl := gomock.NewController(t)
validatorRegistry := cloudstack.NewMockValidatorRegistry(ctrl)
execConfig := &decoder.CloudStackExecConfig{
Profiles: []decoder.CloudStackProfileConfig{
{
Name: "testCred",
ApiKey: "test-key1",
SecretKey: "test-secret1",
ManagementUrl: "http://1.1.1.1:8080/client/api",
},
},
}
validator := cloudstack.NewMockProviderValidator(ctrl)
validatorRegistry.EXPECT().Get(execConfig).Return(validator, nil).Times(1)
validator.EXPECT().ValidateCloudStackDatacenterConfig(ctx, dcConfig).Times(1)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
r := controllers.NewCloudStackDatacenterReconciler(client, validatorRegistry)
_, err := r.Reconcile(ctx, req)
g.Expect(err).NotTo(HaveOccurred())
}
func TestCloudStackDatacenterReconcilerSetDefaultSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
dcConfig.Spec.AvailabilityZones = nil
dcConfig.Spec.Zones = []anywherev1.CloudStackZone{
{
Id: "",
Name: "",
Network: anywherev1.CloudStackResourceIdentifier{},
},
}
secrets := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "global",
Namespace: constants.EksaSystemNamespace,
},
Data: map[string][]byte{
decoder.APIKeyKey: []byte("test-key1"),
decoder.APIUrlKey: []byte("http://1.1.1.1:8080/client/api"),
decoder.SecretKeyKey: []byte("test-secret1"),
},
}
objs := []runtime.Object{dcConfig, secrets}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
ctrl := gomock.NewController(t)
validatorRegistry := cloudstack.NewMockValidatorRegistry(ctrl)
execConfig := &decoder.CloudStackExecConfig{
Profiles: []decoder.CloudStackProfileConfig{
{
Name: "global",
ApiKey: "test-key1",
SecretKey: "test-secret1",
ManagementUrl: "http://1.1.1.1:8080/client/api",
},
},
}
validator := cloudstack.NewMockProviderValidator(ctrl)
validatorRegistry.EXPECT().Get(execConfig).Return(validator, nil).Times(1)
az := anywherev1.CloudStackAvailabilityZone{
Name: anywherev1.DefaultCloudStackAZPrefix + "-0",
CredentialsRef: "global",
Zone: dcConfig.Spec.Zones[0],
}
dcConfig.Spec.AvailabilityZones = append(dcConfig.Spec.AvailabilityZones, az)
dcConfig.Spec.Zones = nil
validator.EXPECT().ValidateCloudStackDatacenterConfig(ctx, dcConfig).Times(1)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
r := controllers.NewCloudStackDatacenterReconciler(client, validatorRegistry)
_, err := r.Reconcile(ctx, req)
g.Expect(err).NotTo(HaveOccurred())
getDcConfig := &anywherev1.CloudStackDatacenterConfig{}
err = client.Get(ctx, req.NamespacedName, getDcConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(getDcConfig.Spec.AvailabilityZones)).ToNot(Equal(0))
g.Expect(getDcConfig.Spec.AvailabilityZones[0].Name).To(Equal(anywherev1.DefaultCloudStackAZPrefix + "-0"))
}
func TestCloudstackDatacenterConfigReconcilerDelete(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
dcConfig.DeletionTimestamp = &metav1.Time{Time: time.Now()}
objs := []runtime.Object{dcConfig}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
ctrl := gomock.NewController(t)
validatorRegistry := cloudstack.NewMockValidatorRegistry(ctrl)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
r := controllers.NewCloudStackDatacenterReconciler(client, validatorRegistry)
_, err := r.Reconcile(ctx, req)
g.Expect(err).NotTo(HaveOccurred())
}
func TestCloudstackDatacenterConfigGetValidatorFailure(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
dcConfig.Spec.AvailabilityZones = nil
objs := []runtime.Object{dcConfig}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
ctrl := gomock.NewController(t)
validatorRegistry := cloudstack.NewMockValidatorRegistry(ctrl)
execConfig := &decoder.CloudStackExecConfig{}
errMsg := "building cmk executable: nil exec config for CloudMonkey, unable to proceed"
validatorRegistry.EXPECT().Get(execConfig).Return(nil, errors.New(errMsg)).Times(1)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
r := controllers.NewCloudStackDatacenterReconciler(client, validatorRegistry)
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(MatchError(ContainSubstring(errMsg)))
}
func TestCloudstackDatacenterConfigGetDatacenterFailure(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
ctrl := gomock.NewController(t)
validatorRegistry := cloudstack.NewMockValidatorRegistry(ctrl)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
r := controllers.NewCloudStackDatacenterReconciler(client, validatorRegistry)
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(MatchError(ContainSubstring("failed getting cloudstack datacenter config")))
}
func TestCloudstackDatacenterConfigGetExecConfigFailure(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
objs := []runtime.Object{dcConfig}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
r := controllers.NewCloudStackDatacenterReconciler(client, nil)
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(MatchError(ContainSubstring("secrets \"testCred\" not found")))
gotDatacenterConfig := &anywherev1.CloudStackDatacenterConfig{}
err = client.Get(ctx, req.NamespacedName, gotDatacenterConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotDatacenterConfig.Status.SpecValid).To(BeFalse())
}
func TestCloudstackDatacenterConfigAccountNotPresentFailure(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
dcConfig := createCloudstackDatacenterConfig()
secrets := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "testCred",
Namespace: constants.EksaSystemNamespace,
},
Data: map[string][]byte{
decoder.APIKeyKey: []byte("test-key1"),
decoder.APIUrlKey: []byte("http://1.1.1.1:8080/client/api"),
decoder.SecretKeyKey: []byte("test-secret1"),
},
}
objs := []runtime.Object{dcConfig, secrets}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
ctrl := gomock.NewController(t)
validatorRegistry := cloudstack.NewMockValidatorRegistry(ctrl)
execConfig := &decoder.CloudStackExecConfig{
Profiles: []decoder.CloudStackProfileConfig{
{
Name: "testCred",
ApiKey: "test-key1",
SecretKey: "test-secret1",
ManagementUrl: "http://1.1.1.1:8080/client/api",
},
},
}
validator := cloudstack.NewMockProviderValidator(ctrl)
validatorRegistry.EXPECT().Get(execConfig).Return(validator, nil).Times(1)
validator.EXPECT().ValidateCloudStackDatacenterConfig(ctx, dcConfig).Return(errors.New("test error")).Times(1)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
r := controllers.NewCloudStackDatacenterReconciler(client, validatorRegistry)
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(MatchError(ContainSubstring("test error")))
gotDatacenterConfig := &anywherev1.CloudStackDatacenterConfig{}
err = client.Get(ctx, req.NamespacedName, gotDatacenterConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotDatacenterConfig.Status.SpecValid).To(BeFalse())
}
func createCloudstackDatacenterConfig() *anywherev1.CloudStackDatacenterConfig {
return &anywherev1.CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.CloudStackDatacenterKind,
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: anywherev1.CloudStackDatacenterConfigSpec{
AvailabilityZones: []anywherev1.CloudStackAvailabilityZone{
{
Name: "testAz",
CredentialsRef: "testCred",
Zone: anywherev1.CloudStackZone{
Name: "zone1",
Network: anywherev1.CloudStackResourceIdentifier{
Name: "SharedNet1",
},
},
Domain: "testDomain",
Account: "testAccount",
ManagementApiEndpoint: "testApiEndpoint",
},
},
},
}
}
| 326 |
eks-anywhere | aws | Go | package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/controller/handlers"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
defaultRequeueTime = time.Minute
// ClusterFinalizerName is the finalizer added to clusters to handle deletion.
ClusterFinalizerName = "clusters.anywhere.eks.amazonaws.com/finalizer"
)
// ClusterReconciler reconciles a Cluster object.
type ClusterReconciler struct {
client client.Client
providerReconcilerRegistry ProviderClusterReconcilerRegistry
awsIamAuth AWSIamConfigReconciler
clusterValidator ClusterValidator
packagesClient PackagesClient
// experimentalSelfManagedUpgrade enables management cluster full upgrades.
// The default behavior for management cluster only reconciles the worker nodes.
// When this is enabled, the controller will handle management clusters in the same
// way as workload clusters: it will reconcile CP, etcd and workers.
// Only intended for internal testing.
experimentalSelfManagedUpgrade bool
}
// PackagesClient handles curated packages operations from within the cluster
// controller.
type PackagesClient interface {
EnableFullLifecycle(ctx context.Context, log logr.Logger, clusterName, kubeConfig string, chart *v1alpha1.Image, registry *registrymirror.RegistryMirror, options ...curatedpackages.PackageControllerClientOpt) error
ReconcileDelete(context.Context, logr.Logger, curatedpackages.KubeDeleter, *anywherev1.Cluster) error
Reconcile(context.Context, logr.Logger, client.Client, *anywherev1.Cluster) error
}
type ProviderClusterReconcilerRegistry interface {
Get(datacenterKind string) clusters.ProviderClusterReconciler
}
// AWSIamConfigReconciler manages aws-iam-authenticator installation and configuration for an eks-a cluster.
type AWSIamConfigReconciler interface {
EnsureCASecret(ctx context.Context, logger logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error)
Reconcile(ctx context.Context, logger logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error)
ReconcileDelete(ctx context.Context, logger logr.Logger, cluster *anywherev1.Cluster) error
}
// ClusterValidator runs cluster level preflight validations before it goes to provider reconciler.
type ClusterValidator interface {
ValidateManagementClusterName(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) error
}
// ClusterReconcilerOption allows to configure the ClusterReconciler.
type ClusterReconcilerOption func(*ClusterReconciler)
// NewClusterReconciler constructs a new ClusterReconciler.
func NewClusterReconciler(client client.Client, registry ProviderClusterReconcilerRegistry, awsIamAuth AWSIamConfigReconciler, clusterValidator ClusterValidator, pkgs PackagesClient, opts ...ClusterReconcilerOption) *ClusterReconciler {
c := &ClusterReconciler{
client: client,
providerReconcilerRegistry: registry,
awsIamAuth: awsIamAuth,
clusterValidator: clusterValidator,
packagesClient: pkgs,
}
for _, opt := range opts {
opt(c)
}
return c
}
// WithExperimentalSelfManagedClusterUpgrades allows to enable experimental upgrades for self
// managed clusters.
func WithExperimentalSelfManagedClusterUpgrades(exp bool) ClusterReconcilerOption {
return func(c *ClusterReconciler) {
c.experimentalSelfManagedUpgrade = exp
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager, log logr.Logger) error {
childObjectHandler := handlers.ChildObjectToClusters(log)
return ctrl.NewControllerManagedBy(mgr).
For(&anywherev1.Cluster{}).
Watches(
&source.Kind{Type: &anywherev1.OIDCConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.AWSIamConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.GitOpsConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.FluxConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.VSphereDatacenterConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.VSphereMachineConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.SnowDatacenterConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.SnowMachineConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.TinkerbellDatacenterConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.TinkerbellMachineConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.DockerDatacenterConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.CloudStackDatacenterConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.CloudStackMachineConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.NutanixDatacenterConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Watches(
&source.Kind{Type: &anywherev1.NutanixMachineConfig{}},
handler.EnqueueRequestsFromMapFunc(childObjectHandler),
).
Complete(r)
}
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch;update
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;delete
// +kubebuilder:rbac:groups="",namespace=eksa-system,resources=secrets,verbs=patch;update
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=create;delete
// +kubebuilder:rbac:groups="",resources=nodes,verbs=list
// +kubebuilder:rbac:groups=addons.cluster.x-k8s.io,resources=clusterresourcesets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters;gitopsconfigs;snowmachineconfigs;snowdatacenterconfigs;snowippools;vspheredatacenterconfigs;vspheremachineconfigs;dockerdatacenterconfigs;tinkerbellmachineconfigs;tinkerbelldatacenterconfigs;cloudstackdatacenterconfigs;cloudstackmachineconfigs;nutanixdatacenterconfigs;nutanixmachineconfigs;awsiamconfigs;oidcconfigs;awsiamconfigs;fluxconfigs,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/status;snowmachineconfigs/status;snowippools/status;vspheredatacenterconfigs/status;vspheremachineconfigs/status;dockerdatacenterconfigs/status;tinkerbelldatacenterconfigs/status;tinkerbellmachineconfigs/status;cloudstackdatacenterconfigs/status;cloudstackmachineconfigs/status;awsiamconfigs/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=bundles,verbs=get;list;watch
// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/finalizers;snowmachineconfigs/finalizers;snowippools/finalizers;vspheredatacenterconfigs/finalizers;vspheremachineconfigs/finalizers;cloudstackdatacenterconfigs/finalizers;cloudstackmachineconfigs/finalizers;dockerdatacenterconfigs/finalizers;bundles/finalizers;awsiamconfigs/finalizers;tinkerbelldatacenterconfigs/finalizers;tinkerbellmachineconfigs/finalizers,verbs=update
// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigtemplates,verbs=create;get;list;patch;update;watch
// +kubebuilder:rbac:groups="cluster.x-k8s.io",resources=machinedeployments,verbs=list;watch;get;patch;update;create;delete
// +kubebuilder:rbac:groups="cluster.x-k8s.io",resources=clusters,verbs=list;watch;get;patch;update;create;delete
// +kubebuilder:rbac:groups=clusterctl.cluster.x-k8s.io,resources=providers,verbs=get;list;watch
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,verbs=list;get;watch;patch;update;create;delete
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=create;get;list;update;watch;delete
// +kubebuilder:rbac:groups=distro.eks.amazonaws.com,resources=releases,verbs=get;list;watch
// +kubebuilder:rbac:groups=etcdcluster.cluster.x-k8s.io,resources=*,verbs=create;get;list;patch;update;watch
// +kubebuilder:rbac:groups=tinkerbell.org,resources=hardware,verbs=list;watch
// +kubebuilder:rbac:groups=bmc.tinkerbell.org,resources=machines,verbs=list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awssnowclusters;awssnowmachinetemplates;awssnowippools;vsphereclusters;vspheremachinetemplates;dockerclusters;dockermachinetemplates;tinkerbellclusters;tinkerbellmachinetemplates;cloudstackclusters;cloudstackmachinetemplates;nutanixclusters;nutanixmachinetemplates,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=packages.eks.amazonaws.com,resources=packages,verbs=create;delete;get;list;patch;update;watch
// +kubebuilder:rbac:groups=packages.eks.amazonaws.com,namespace=eksa-system,resources=packagebundlecontrollers,verbs=delete
// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,namespace=eksa-system,resources=eksareleases,verbs=get;list;watch
// Reconcile reconciles a cluster object.
// nolint:gocyclo
// TODO: Reduce high cycomatic complexity. https://github.com/aws/eks-anywhere-internal/issues/1449
func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) {
log := ctrl.LoggerFrom(ctx)
// Fetch the Cluster objects
cluster := &anywherev1.Cluster{}
log.Info("Reconciling cluster")
if err := r.client.Get(ctx, req.NamespacedName, cluster); err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return ctrl.Result{}, err
}
// Initialize the patch helper
patchHelper, err := patch.NewHelper(cluster, r.client)
if err != nil {
return ctrl.Result{}, err
}
defer func() {
err := r.updateStatus(ctx, log, cluster)
if err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
}
// Always attempt to patch the object and status after each reconciliation.
patchOpts := []patch.Option{}
// We want the observedGeneration to indicate, that the status shown is up-to-date given the desired spec of the same generation.
// However, if there is an error while updating the status, we may get a partial status update, In this case,
// a partially updated status is not considered up to date, so we should not update the observedGeneration
// Patch ObservedGeneration only if the reconciliation completed without error
if reterr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
}
if err := patchCluster(ctx, patchHelper, cluster, patchOpts...); err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
}
// Only requeue if we are not already re-queueing and the Cluster ready condition is false.
// We do this to be able to update the status continuously until the cluster becomes ready,
// since there might be changes in state of the world that don't trigger reconciliation requests
if reterr == nil && !result.Requeue && result.RequeueAfter <= 0 && conditions.IsFalse(cluster, anywherev1.ReadyCondition) {
result = ctrl.Result{RequeueAfter: 10 * time.Second}
}
}()
if !cluster.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, log, cluster)
}
// If the cluster is paused, return without any further processing.
if cluster.IsReconcilePaused() {
log.Info("Cluster reconciliation is paused")
return ctrl.Result{}, nil
}
// AddFinalizer is idempotent
controllerutil.AddFinalizer(cluster, ClusterFinalizerName)
if cluster.Spec.BundlesRef == nil {
if err = r.setBundlesRef(ctx, cluster); err != nil {
return ctrl.Result{}, err
}
}
config, err := r.buildClusterConfig(ctx, cluster)
if err != nil {
return ctrl.Result{}, err
}
if err = r.ensureClusterOwnerReferences(ctx, cluster, config); err != nil {
return ctrl.Result{}, err
}
aggregatedGeneration := aggregatedGeneration(config)
// If there is no difference between the aggregated generation and childrenReconciledGeneration,
// and there is no difference in the reconciled generation and .metadata.generation of the cluster,
// then return without any further processing.
if aggregatedGeneration == cluster.Status.ChildrenReconciledGeneration && cluster.Status.ReconciledGeneration == cluster.Generation {
log.Info("Generation and aggregated generation match reconciled generations for cluster and child objects, skipping reconciliation.")
return ctrl.Result{}, nil
}
return r.reconcile(ctx, log, cluster, aggregatedGeneration)
}
func (r *ClusterReconciler) reconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster, aggregatedGeneration int64) (ctrl.Result, error) {
clusterProviderReconciler := r.providerReconcilerRegistry.Get(cluster.Spec.DatacenterRef.Kind)
var reconcileResult controller.Result
var err error
reconcileResult, err = r.preClusterProviderReconcile(ctx, log, cluster)
if err != nil {
return ctrl.Result{}, err
}
if reconcileResult.Return() {
return reconcileResult.ToCtrlResult(), nil
}
if cluster.IsSelfManaged() && !r.experimentalSelfManagedUpgrade {
// self-managed clusters should only reconcile worker nodes to avoid control plane instability
reconcileResult, err = clusterProviderReconciler.ReconcileWorkerNodes(ctx, log, cluster)
} else {
reconcileResult, err = clusterProviderReconciler.Reconcile(ctx, log, cluster)
}
if err != nil {
return ctrl.Result{}, err
}
if reconcileResult.Return() {
return reconcileResult.ToCtrlResult(), nil
}
reconcileResult, err = r.postClusterProviderReconcile(ctx, log, cluster)
if err != nil {
return ctrl.Result{}, err
}
if reconcileResult.Return() {
return reconcileResult.ToCtrlResult(), nil
}
// At the end of the reconciliation, if there have been no requeues or errors, we update the cluster's status.
// NOTE: This update must be the last step in the reconciliation process to denote the complete reconciliation.
// No other mutating changes or reconciliations must happen in this loop after this step, so all such changes must
// be placed above this line.
cluster.Status.ReconciledGeneration = cluster.Generation
cluster.Status.ChildrenReconciledGeneration = aggregatedGeneration
return ctrl.Result{}, nil
}
func (r *ClusterReconciler) preClusterProviderReconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
// Run some preflight validations that can't be checked in webhook
if cluster.HasAWSIamConfig() {
if result, err := r.awsIamAuth.EnsureCASecret(ctx, log, cluster); err != nil {
return controller.Result{}, err
} else if result.Return() {
return result, nil
}
}
if cluster.IsManaged() {
if err := r.clusterValidator.ValidateManagementClusterName(ctx, log, cluster); err != nil {
cluster.Status.FailureMessage = ptr.String(err.Error())
return controller.Result{}, err
}
}
if cluster.RegistryAuth() {
rUsername, rPassword, err := config.ReadCredentialsFromSecret(ctx, r.client)
if err != nil {
return controller.Result{}, err
}
if err := config.SetCredentialsEnv(rUsername, rPassword); err != nil {
return controller.Result{}, err
}
}
return controller.Result{}, nil
}
func (r *ClusterReconciler) postClusterProviderReconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
if cluster.HasAWSIamConfig() {
if result, err := r.awsIamAuth.Reconcile(ctx, log, cluster); err != nil {
return controller.Result{}, err
} else if result.Return() {
return result, nil
}
}
// Self-managed clusters can support curated packages, but that support
// comes from the CLI at this time.
if cluster.IsManaged() && cluster.IsPackagesEnabled() {
if err := r.packagesClient.Reconcile(ctx, log, r.client, cluster); err != nil {
return controller.Result{}, err
}
}
return controller.Result{}, nil
}
func (r *ClusterReconciler) updateStatus(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) error {
// When EKS-A cluster is fully deleted, we do not need to update the status. Without this check
// the subsequent patch operations would fail if the status is updated after it is fully deleted.
if !cluster.DeletionTimestamp.IsZero() && len(cluster.GetFinalizers()) == 0 {
log.Info("Cluster is fully deleted, skipping cluster status update")
return nil
}
log.Info("Updating cluster status")
if err := clusters.UpdateClusterStatusForControlPlane(ctx, r.client, cluster); err != nil {
return errors.Wrap(err, "updating status for control plane")
}
if err := clusters.UpdateClusterStatusForWorkers(ctx, r.client, cluster); err != nil {
return errors.Wrap(err, "updating status for workers")
}
clusters.UpdateClusterStatusForCNI(ctx, cluster)
// Always update the readyCondition by summarizing the state of other conditions.
conditions.SetSummary(cluster,
conditions.WithConditions(
anywherev1.ControlPlaneInitializedCondition,
anywherev1.ControlPlaneReadyCondition,
anywherev1.WorkersReadyConditon,
),
)
return nil
}
func (r *ClusterReconciler) reconcileDelete(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (ctrl.Result, error) {
if cluster.IsSelfManaged() {
return ctrl.Result{}, errors.New("deleting self-managed clusters is not supported")
}
if metav1.HasAnnotation(cluster.ObjectMeta, anywherev1.ManagedByCLIAnnotation) {
log.Info("Clusters is managed by CLI, removing finalizer")
controllerutil.RemoveFinalizer(cluster, ClusterFinalizerName)
return ctrl.Result{}, nil
}
if cluster.IsReconcilePaused() {
log.Info("Cluster reconciliation is paused, won't process cluster deletion")
return ctrl.Result{}, nil
}
capiCluster := &clusterv1.Cluster{}
capiClusterName := types.NamespacedName{Namespace: constants.EksaSystemNamespace, Name: cluster.Name}
log.Info("Deleting", "name", cluster.Name)
err := r.client.Get(ctx, capiClusterName, capiCluster)
switch {
case err == nil:
log.Info("Deleting CAPI cluster", "name", capiCluster.Name)
if err := r.client.Delete(ctx, capiCluster); err != nil {
log.Info("Error deleting CAPI cluster", "name", capiCluster.Name)
return ctrl.Result{}, err
}
return ctrl.Result{RequeueAfter: defaultRequeueTime}, nil
case apierrors.IsNotFound(err):
log.Info("Deleting EKS Anywhere cluster", "name", capiCluster.Name, "cluster.DeletionTimestamp", cluster.DeletionTimestamp, "finalizer", cluster.Finalizers)
// TODO delete GitOps,Datacenter and MachineConfig objects
controllerutil.RemoveFinalizer(cluster, ClusterFinalizerName)
default:
return ctrl.Result{}, err
}
if cluster.HasAWSIamConfig() {
if err := r.awsIamAuth.ReconcileDelete(ctx, log, cluster); err != nil {
return ctrl.Result{}, err
}
}
if cluster.IsManaged() {
if err := r.packagesClient.ReconcileDelete(ctx, log, r.client, cluster); err != nil {
return ctrl.Result{}, fmt.Errorf("deleting packages for cluster %q: %w", cluster.Name, err)
}
}
return ctrl.Result{}, nil
}
func (r *ClusterReconciler) buildClusterConfig(ctx context.Context, clus *anywherev1.Cluster) (*cluster.Config, error) {
builder := cluster.NewDefaultConfigClientBuilder()
config, err := builder.Build(ctx, clientutil.NewKubeClient(r.client), clus)
if err != nil {
var notFound apierrors.APIStatus
if apierrors.IsNotFound(err) && errors.As(err, ¬Found) {
clus.Status.FailureMessage = ptr.String(fmt.Sprintf("Dependent cluster objects don't exist: %s", notFound))
}
return nil, err
}
return config, nil
}
func (r *ClusterReconciler) ensureClusterOwnerReferences(ctx context.Context, clus *anywherev1.Cluster, config *cluster.Config) error {
for _, obj := range config.ChildObjects() {
numberOfOwnerReferences := len(obj.GetOwnerReferences())
if err := controllerutil.SetOwnerReference(clus, obj, r.client.Scheme()); err != nil {
return errors.Wrapf(err, "setting cluster owner reference for %s", obj.GetObjectKind())
}
if numberOfOwnerReferences == len(obj.GetOwnerReferences()) {
// obj already had the owner reference
continue
}
if err := r.client.Update(ctx, obj); err != nil {
return errors.Wrapf(err, "updating object (%s) with cluster owner reference", obj.GetObjectKind())
}
}
return nil
}
func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *anywherev1.Cluster, patchOpts ...patch.Option) error {
// Patch the object, ignoring conflicts on the conditions owned by this controller.
options := append([]patch.Option{
patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
// Add each condition her that the controller should ignored conflicts for.
anywherev1.ReadyCondition,
anywherev1.ControlPlaneInitializedCondition,
anywherev1.ControlPlaneReadyCondition,
anywherev1.WorkersReadyConditon,
anywherev1.DefaultCNIConfiguredCondition,
}},
}, patchOpts...)
// Always attempt to patch the object and status after each reconciliation.
return patchHelper.Patch(ctx, cluster, options...)
}
// aggregatedGeneration computes the combined generation of the resources linked
// by the cluster by summing up the .metadata.generation value for all the child
// objects of this cluster.
func aggregatedGeneration(config *cluster.Config) int64 {
var aggregatedGeneration int64
for _, obj := range config.ChildObjects() {
aggregatedGeneration += obj.GetGeneration()
}
return aggregatedGeneration
}
func (r *ClusterReconciler) setBundlesRef(ctx context.Context, clus *anywherev1.Cluster) error {
mgmtCluster := &anywherev1.Cluster{}
if err := r.client.Get(ctx, types.NamespacedName{Name: clus.ManagedBy(), Namespace: clus.Namespace}, mgmtCluster); err != nil {
if apierrors.IsNotFound(err) {
clus.Status.FailureMessage = ptr.String(fmt.Sprintf("Management cluster %s does not exist", clus.Spec.ManagementCluster.Name))
}
return err
}
clus.Spec.BundlesRef = mgmtCluster.Spec.BundlesRef
return nil
}
| 557 |
eks-anywhere | aws | Go | package controllers_test
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/go-logr/logr"
"github.com/go-logr/logr/testr"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
apiv1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/controllers"
"github.com/aws/eks-anywhere/controllers/mocks"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/govmomi"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
vspheremocks "github.com/aws/eks-anywhere/pkg/providers/vsphere/mocks"
vspherereconciler "github.com/aws/eks-anywhere/pkg/providers/vsphere/reconciler"
vspherereconcilermocks "github.com/aws/eks-anywhere/pkg/providers/vsphere/reconciler/mocks"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
var clusterName = "test-cluster"
var controlPlaneInitalizationInProgressReason = "The first control plane instance is not available yet"
type vsphereClusterReconcilerTest struct {
govcClient *vspheremocks.MockProviderGovcClient
reconciler *controllers.ClusterReconciler
client client.Client
}
func testKubeadmControlPlaneFromCluster(cluster *anywherev1.Cluster) *controlplanev1.KubeadmControlPlane {
k := controller.CAPIKubeadmControlPlaneKey(cluster)
return test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Name = k.Name
kcp.Namespace = k.Namespace
expectedReplicas := int32(cluster.Spec.ControlPlaneConfiguration.Count)
kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
Replicas: expectedReplicas,
UpdatedReplicas: expectedReplicas,
ReadyReplicas: expectedReplicas,
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("True"),
},
},
}
})
}
func machineDeploymentsFromCluster(cluster *anywherev1.Cluster) []clusterv1.MachineDeployment {
return []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: cluster.Name,
}
md.Status.Replicas = 1
md.Status.ReadyReplicas = 1
md.Status.UpdatedReplicas = 1
}),
}
}
func newVsphereClusterReconcilerTest(t *testing.T, objs ...runtime.Object) *vsphereClusterReconcilerTest {
ctrl := gomock.NewController(t)
govcClient := vspheremocks.NewMockProviderGovcClient(ctrl)
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
iam := mocks.NewMockAWSIamConfigReconciler(ctrl)
clusterValidator := mocks.NewMockClusterValidator(ctrl)
vcb := govmomi.NewVMOMIClientBuilder()
validator := vsphere.NewValidator(govcClient, vcb)
defaulter := vsphere.NewDefaulter(govcClient)
cniReconciler := vspherereconcilermocks.NewMockCNIReconciler(ctrl)
ipValidator := vspherereconcilermocks.NewMockIPValidator(ctrl)
reconciler := vspherereconciler.New(
cl,
validator,
defaulter,
cniReconciler,
nil,
ipValidator,
)
registry := clusters.NewProviderClusterReconcilerRegistryBuilder().
Add(anywherev1.VSphereDatacenterKind, reconciler).
Build()
mockPkgs := mocks.NewMockPackagesClient(ctrl)
mockPkgs.EXPECT().
ReconcileDelete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil).AnyTimes()
r := controllers.NewClusterReconciler(cl, ®istry, iam, clusterValidator, mockPkgs)
return &vsphereClusterReconcilerTest{
govcClient: govcClient,
reconciler: r,
client: cl,
}
}
func TestClusterReconcilerReconcileSelfManagedCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
selfManagedCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
kcp := testKubeadmControlPlaneFromCluster(selfManagedCluster)
controller := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(controller)
iam := mocks.NewMockAWSIamConfigReconciler(controller)
clusterValidator := mocks.NewMockClusterValidator(controller)
registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp).Build()
mockPkgs := mocks.NewMockPackagesClient(controller)
providerReconciler.EXPECT().ReconcileWorkerNodes(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster))
r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, mockPkgs)
result, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster))
g.Expect(err).ToNot(HaveOccurred())
g.Expect(result).To(Equal(ctrl.Result{}))
}
func TestClusterReconcilerReconcileConditions(t *testing.T) {
testCases := []struct {
testName string
skipCNIUpgrade bool
kcpStatus controlplanev1.KubeadmControlPlaneStatus
machineDeploymentStatus clusterv1.MachineDeploymentStatus
result ctrl.Result
wantConditions []anywherev1.Condition
}{
{
testName: "cluster not ready, control plane not initialized",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("False"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{},
wantConditions: []anywherev1.Condition{
*conditions.FalseCondition(anywherev1.ControlPlaneInitializedCondition, anywherev1.ControlPlaneInitializationInProgressReason, clusterv1.ConditionSeverityInfo, controlPlaneInitalizationInProgressReason),
*conditions.FalseCondition(anywherev1.ControlPlaneReadyCondition, anywherev1.ControlPlaneInitializationInProgressReason, clusterv1.ConditionSeverityInfo, controlPlaneInitalizationInProgressReason),
*conditions.FalseCondition(anywherev1.DefaultCNIConfiguredCondition, anywherev1.ControlPlaneNotReadyReason, clusterv1.ConditionSeverityInfo, ""),
*conditions.FalseCondition(anywherev1.WorkersReadyConditon, anywherev1.ControlPlaneNotInitializedReason, clusterv1.ConditionSeverityInfo, ""),
*conditions.FalseCondition(anywherev1.ReadyCondition, anywherev1.ControlPlaneInitializationInProgressReason, clusterv1.ConditionSeverityInfo, controlPlaneInitalizationInProgressReason),
},
result: ctrl.Result{Requeue: false, RequeueAfter: 10 * time.Second},
},
{
testName: "cluster not ready, control plane initialized",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("False"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{},
wantConditions: []anywherev1.Condition{
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
*conditions.FalseCondition(anywherev1.ControlPlaneReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up control plane nodes, 1 expected (0 actual)"),
*conditions.FalseCondition(anywherev1.DefaultCNIConfiguredCondition, anywherev1.ControlPlaneNotReadyReason, clusterv1.ConditionSeverityInfo, ""),
*conditions.FalseCondition(anywherev1.WorkersReadyConditon, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.FalseCondition(anywherev1.ReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up control plane nodes, 1 expected (0 actual)"),
},
result: ctrl.Result{Requeue: false, RequeueAfter: 10 * time.Second},
},
{
testName: "cluster not ready, control plane ready",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("True"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{},
wantConditions: []anywherev1.Condition{
*conditions.FalseCondition(anywherev1.ReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.TrueCondition(anywherev1.ControlPlaneReadyCondition),
*conditions.FalseCondition(anywherev1.WorkersReadyConditon, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
},
result: ctrl.Result{Requeue: false, RequeueAfter: 10 * time.Second},
},
{
testName: "cluster ready",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("True"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
},
wantConditions: []anywherev1.Condition{
*conditions.TrueCondition(anywherev1.ReadyCondition),
*conditions.TrueCondition(anywherev1.ControlPlaneReadyCondition),
*conditions.TrueCondition(anywherev1.WorkersReadyConditon),
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
},
result: ctrl.Result{},
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
config, bundles := baseTestVsphereCluster()
config.Cluster.Name = "test-cluster"
config.Cluster.Generation = 2
config.Cluster.Status.ObservedGeneration = 1
config.Cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: "management-cluster"}
config.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(tt.skipCNIUpgrade)
g := NewWithT(t)
objs := make([]runtime.Object, 0, 4+len(config.ChildObjects()))
kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
k := controller.CAPIKubeadmControlPlaneKey(config.Cluster)
kcp.Name = k.Name
kcp.Namespace = k.Namespace
kcp.Status = tt.kcpStatus
})
md1 := test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: config.Cluster.Name,
}
md.Status = tt.machineDeploymentStatus
})
objs = append(objs, config.Cluster, bundles, kcp, md1)
for _, o := range config.ChildObjects() {
objs = append(objs, o)
}
testClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
mockCtrl := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(mockCtrl)
iam := mocks.NewMockAWSIamConfigReconciler(mockCtrl)
clusterValidator := mocks.NewMockClusterValidator(mockCtrl)
registry := newRegistryMock(providerReconciler)
mockPkgs := mocks.NewMockPackagesClient(mockCtrl)
ctx := context.Background()
log := testr.New(t)
logCtx := ctrl.LoggerInto(ctx, log)
iam.EXPECT().EnsureCASecret(logCtx, log, sameName(config.Cluster)).Return(controller.Result{}, nil)
iam.EXPECT().Reconcile(logCtx, log, sameName(config.Cluster)).Return(controller.Result{}, nil)
providerReconciler.EXPECT().Reconcile(logCtx, log, sameName(config.Cluster)).Times(1)
clusterValidator.EXPECT().ValidateManagementClusterName(logCtx, log, sameName(config.Cluster)).Return(nil)
mockPkgs.EXPECT().Reconcile(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
r := controllers.NewClusterReconciler(testClient, registry, iam, clusterValidator, mockPkgs)
result, err := r.Reconcile(logCtx, clusterRequest(config.Cluster))
g.Expect(err).ToNot(HaveOccurred())
g.Expect(result).To(Equal(tt.result))
api := envtest.NewAPIExpecter(t, testClient)
c := envtest.CloneNameNamespace(config.Cluster)
api.ShouldEventuallyMatch(logCtx, c, func(g Gomega) {
g.Expect(c.Status.ObservedGeneration).To(
Equal(c.Generation), "status generation should have been updated to the metadata generation's value",
)
})
api.ShouldEventuallyMatch(logCtx, c, func(g Gomega) {
for _, wantCondition := range tt.wantConditions {
condition := conditions.Get(c, wantCondition.Type)
g.Expect(condition).ToNot(BeNil())
g.Expect(condition).To((conditions.HaveSameStateOf(&wantCondition)))
}
})
})
}
}
func TestClusterReconcilerReconcileSelfManagedClusterConditions(t *testing.T) {
testCases := []struct {
testName string
skipCNIUpgrade bool
kcpStatus controlplanev1.KubeadmControlPlaneStatus
machineDeploymentStatus clusterv1.MachineDeploymentStatus
result ctrl.Result
wantConditions []anywherev1.Condition
}{
{
testName: "cluster not ready, control plane not ready",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("False"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{},
skipCNIUpgrade: false,
wantConditions: []anywherev1.Condition{
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
*conditions.FalseCondition(anywherev1.ControlPlaneReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up control plane nodes, 1 expected (0 actual)"),
*conditions.FalseCondition(anywherev1.DefaultCNIConfiguredCondition, anywherev1.ControlPlaneNotReadyReason, clusterv1.ConditionSeverityInfo, ""),
*conditions.FalseCondition(anywherev1.WorkersReadyConditon, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.FalseCondition(anywherev1.ReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up control plane nodes, 1 expected (0 actual)"),
},
result: ctrl.Result{Requeue: false, RequeueAfter: 10 * time.Second},
},
{
testName: "cluster not ready, control plane ready",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("True"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{},
skipCNIUpgrade: false,
wantConditions: []anywherev1.Condition{
*conditions.FalseCondition(anywherev1.ReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.TrueCondition(anywherev1.ControlPlaneReadyCondition),
*conditions.TrueCondition(anywherev1.DefaultCNIConfiguredCondition),
*conditions.FalseCondition(anywherev1.WorkersReadyConditon, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
},
result: ctrl.Result{Requeue: false, RequeueAfter: 10 * time.Second},
},
{
testName: "cluster not ready, skip upgrades for default cni",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("True"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{},
skipCNIUpgrade: true,
wantConditions: []anywherev1.Condition{
*conditions.FalseCondition(anywherev1.ReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.TrueCondition(anywherev1.ControlPlaneReadyCondition),
*conditions.FalseCondition(anywherev1.DefaultCNIConfiguredCondition, anywherev1.SkipUpgradesForDefaultCNIConfiguredReason, clusterv1.ConditionSeverityWarning, "Configured to skip default Cilium CNI upgrades"),
*conditions.FalseCondition(anywherev1.WorkersReadyConditon, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, 1 expected (0 actual)"),
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
},
result: ctrl.Result{Requeue: false, RequeueAfter: 10 * time.Second},
},
{
testName: "cluster ready, skip default cni upgrades",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("True"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
},
skipCNIUpgrade: true,
wantConditions: []anywherev1.Condition{
*conditions.TrueCondition(anywherev1.ReadyCondition),
*conditions.TrueCondition(anywherev1.ControlPlaneReadyCondition),
*conditions.FalseCondition(anywherev1.DefaultCNIConfiguredCondition, anywherev1.SkipUpgradesForDefaultCNIConfiguredReason, clusterv1.ConditionSeverityWarning, "Configured to skip default Cilium CNI upgrades"),
*conditions.TrueCondition(anywherev1.WorkersReadyConditon),
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
},
result: ctrl.Result{},
},
{
testName: "cluster ready",
kcpStatus: controlplanev1.KubeadmControlPlaneStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
Conditions: clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: apiv1.ConditionStatus("True"),
},
{
Type: clusterv1.ReadyCondition,
Status: apiv1.ConditionStatus("True"),
},
},
},
machineDeploymentStatus: clusterv1.MachineDeploymentStatus{
ReadyReplicas: 1,
Replicas: 1,
UpdatedReplicas: 1,
},
skipCNIUpgrade: false,
wantConditions: []anywherev1.Condition{
*conditions.TrueCondition(anywherev1.ReadyCondition),
*conditions.TrueCondition(anywherev1.ControlPlaneReadyCondition),
*conditions.TrueCondition(anywherev1.DefaultCNIConfiguredCondition),
*conditions.TrueCondition(anywherev1.WorkersReadyConditon),
*conditions.TrueCondition(anywherev1.ControlPlaneInitializedCondition),
},
result: ctrl.Result{},
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
config, bundles := baseTestVsphereCluster()
clusterName := "test-cluster"
config.Cluster.Name = clusterName
config.Cluster.Generation = 2
config.Cluster.Status.ObservedGeneration = 1
config.Cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: clusterName}
config.Cluster.Spec.ClusterNetwork.CNIConfig.Cilium.SkipUpgrade = ptr.Bool(tt.skipCNIUpgrade)
g := NewWithT(t)
objs := make([]runtime.Object, 0, 4+len(config.ChildObjects()))
kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
k := controller.CAPIKubeadmControlPlaneKey(config.Cluster)
kcp.Name = k.Name
kcp.Namespace = k.Namespace
kcp.Status = tt.kcpStatus
})
md1 := test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: config.Cluster.Name,
}
md.Status = tt.machineDeploymentStatus
})
objs = append(objs, config.Cluster, bundles, kcp, md1)
for _, o := range config.ChildObjects() {
objs = append(objs, o)
}
testClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
mockCtrl := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(mockCtrl)
iam := mocks.NewMockAWSIamConfigReconciler(mockCtrl)
clusterValidator := mocks.NewMockClusterValidator(mockCtrl)
registry := newRegistryMock(providerReconciler)
mockPkgs := mocks.NewMockPackagesClient(mockCtrl)
ctx := context.Background()
log := testr.New(t)
logCtx := ctrl.LoggerInto(ctx, log)
iam.EXPECT().EnsureCASecret(logCtx, log, sameName(config.Cluster)).Return(controller.Result{}, nil)
iam.EXPECT().Reconcile(logCtx, log, sameName(config.Cluster)).Return(controller.Result{}, nil)
providerReconciler.EXPECT().ReconcileWorkerNodes(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
r := controllers.NewClusterReconciler(testClient, registry, iam, clusterValidator, mockPkgs)
result, err := r.Reconcile(logCtx, clusterRequest(config.Cluster))
g.Expect(err).ToNot(HaveOccurred())
g.Expect(result).To(Equal(tt.result))
api := envtest.NewAPIExpecter(t, testClient)
c := envtest.CloneNameNamespace(config.Cluster)
api.ShouldEventuallyMatch(logCtx, c, func(g Gomega) {
g.Expect(c.Status.ObservedGeneration).To(
Equal(c.Generation), "status generation should have been updated to the metadata generation's value",
)
})
api.ShouldEventuallyMatch(logCtx, c, func(g Gomega) {
for _, wantCondition := range tt.wantConditions {
condition := conditions.Get(c, wantCondition.Type)
g.Expect(condition).ToNot(BeNil())
g.Expect(condition).To((conditions.HaveSameStateOf(&wantCondition)))
}
})
})
}
}
func TestClusterReconcilerReconcileGenerations(t *testing.T) {
testCases := []struct {
testName string
clusterGeneration int64
childReconciledGeneration int64
reconciledGeneration int64
datacenterGeneration int64
cpMachineConfigGeneration int64
workerMachineConfigGeneration int64
oidcGeneration int64
awsIAMGeneration int64
wantReconciliation bool
wantChildReconciledGeneration int64
}{
{
testName: "matching generation, matching aggregated generation",
clusterGeneration: 2,
reconciledGeneration: 2,
childReconciledGeneration: 12,
datacenterGeneration: 1,
cpMachineConfigGeneration: 2,
workerMachineConfigGeneration: 5,
oidcGeneration: 3,
awsIAMGeneration: 1,
wantReconciliation: false,
wantChildReconciledGeneration: 12,
},
{
testName: "matching generation, non-matching aggregated generation",
clusterGeneration: 2,
reconciledGeneration: 2,
childReconciledGeneration: 10,
datacenterGeneration: 1,
cpMachineConfigGeneration: 2,
workerMachineConfigGeneration: 5,
oidcGeneration: 3,
awsIAMGeneration: 1,
wantReconciliation: true,
wantChildReconciledGeneration: 12,
},
{
testName: "non-matching generation, matching aggregated generation",
clusterGeneration: 3,
reconciledGeneration: 2,
childReconciledGeneration: 12,
datacenterGeneration: 1,
cpMachineConfigGeneration: 2,
workerMachineConfigGeneration: 5,
oidcGeneration: 3,
awsIAMGeneration: 1,
wantReconciliation: true,
wantChildReconciledGeneration: 12,
},
{
testName: "non-matching generation, non-matching aggregated generation",
clusterGeneration: 3,
reconciledGeneration: 2,
childReconciledGeneration: 12,
datacenterGeneration: 1,
cpMachineConfigGeneration: 2,
workerMachineConfigGeneration: 5,
oidcGeneration: 3,
awsIAMGeneration: 3,
wantReconciliation: true,
wantChildReconciledGeneration: 14,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
config, bundles := baseTestVsphereCluster()
config.Cluster.Generation = tt.clusterGeneration
config.Cluster.Status.ObservedGeneration = tt.clusterGeneration
config.Cluster.Status.ReconciledGeneration = tt.reconciledGeneration
config.Cluster.Status.ReconciledGeneration = tt.reconciledGeneration
config.Cluster.Status.ChildrenReconciledGeneration = tt.childReconciledGeneration
config.VSphereDatacenter.Generation = tt.datacenterGeneration
cpMachine := config.VSphereMachineConfigs[config.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
cpMachine.Generation = tt.cpMachineConfigGeneration
workerMachineConfig := config.VSphereMachineConfigs[config.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name]
workerMachineConfig.Generation = tt.workerMachineConfigGeneration
for _, oidc := range config.OIDCConfigs {
oidc.Generation = tt.oidcGeneration
}
for _, awsIAM := range config.AWSIAMConfigs {
awsIAM.Generation = tt.awsIAMGeneration
}
kcp := testKubeadmControlPlaneFromCluster(config.Cluster)
machineDeployments := machineDeploymentsFromCluster(config.Cluster)
g := NewWithT(t)
ctx := context.Background()
objs := make([]runtime.Object, 0, 7+len(machineDeployments))
objs = append(objs, config.Cluster, bundles)
for _, o := range config.ChildObjects() {
objs = append(objs, o)
}
objs = append(objs, kcp)
for _, obj := range machineDeployments {
objs = append(objs, obj.DeepCopy())
}
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
mockCtrl := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(mockCtrl)
iam := mocks.NewMockAWSIamConfigReconciler(mockCtrl)
clusterValidator := mocks.NewMockClusterValidator(mockCtrl)
registry := newRegistryMock(providerReconciler)
mockPkgs := mocks.NewMockPackagesClient(mockCtrl)
if tt.wantReconciliation {
iam.EXPECT().EnsureCASecret(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(config.Cluster)).Return(controller.Result{}, nil)
iam.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(config.Cluster)).Return(controller.Result{}, nil)
providerReconciler.EXPECT().ReconcileWorkerNodes(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Times(1)
} else {
providerReconciler.EXPECT().ReconcileWorkerNodes(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
}
r := controllers.NewClusterReconciler(client, registry, iam, clusterValidator, mockPkgs)
result, err := r.Reconcile(ctx, clusterRequest(config.Cluster))
g.Expect(err).ToNot(HaveOccurred())
g.Expect(result).To(Equal(ctrl.Result{}))
api := envtest.NewAPIExpecter(t, client)
c := envtest.CloneNameNamespace(config.Cluster)
api.ShouldEventuallyMatch(ctx, c, func(g Gomega) {
g.Expect(c.Status.ReconciledGeneration).To(
Equal(c.Generation), "status generation should have been updated to the metadata generation's value",
)
g.Expect(c.Status.ChildrenReconciledGeneration).To(
Equal(tt.wantChildReconciledGeneration), "status children generation should have been updated to the aggregated generation's value",
)
})
})
}
}
func TestClusterReconcilerReconcileSelfManagedClusterWithExperimentalUpgrades(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
selfManagedCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
kcp := testKubeadmControlPlaneFromCluster(selfManagedCluster)
controller := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(controller)
iam := mocks.NewMockAWSIamConfigReconciler(controller)
clusterValidator := mocks.NewMockClusterValidator(controller)
registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp).Build()
mockPkgs := mocks.NewMockPackagesClient(controller)
providerReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster))
r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, mockPkgs,
controllers.WithExperimentalSelfManagedClusterUpgrades(true),
)
result, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster))
g.Expect(err).ToNot(HaveOccurred())
g.Expect(result).To(Equal(ctrl.Result{}))
}
func TestClusterReconcilerReconcilePausedCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := vsphereCluster()
managementCluster.Name = "management-cluster"
cluster := vsphereCluster()
cluster.SetManagedBy(managementCluster.Name)
capiCluster := newCAPICluster(cluster.Name, cluster.Namespace)
kcp := testKubeadmControlPlaneFromCluster(cluster)
machineDeployments := machineDeploymentsFromCluster(cluster)
objs := make([]runtime.Object, 0, 5)
objs = append(objs, managementCluster, cluster, capiCluster, kcp)
for _, md := range machineDeployments {
objs = append(objs, md.DeepCopy())
}
// Mark as paused
cluster.PauseReconcile()
c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
ctrl := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(ctrl)
iam := mocks.NewMockAWSIamConfigReconciler(ctrl)
clusterValidator := mocks.NewMockClusterValidator(ctrl)
registry := newRegistryMock(providerReconciler)
r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, nil)
g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).To(Equal(reconcile.Result{}))
api := envtest.NewAPIExpecter(t, c)
cl := envtest.CloneNameNamespace(cluster)
api.ShouldEventuallyMatch(ctx, cl, func(g Gomega) {
g.Expect(
controllerutil.ContainsFinalizer(cluster, controllers.ClusterFinalizerName),
).To(BeFalse(), "Cluster should not have the finalizer added")
})
}
func TestClusterReconcilerReconcileDeletedSelfManagedCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
deleteTimestamp := metav1.NewTime(time.Now())
selfManagedCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
DeletionTimestamp: &deleteTimestamp,
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
controller := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(controller)
iam := mocks.NewMockAWSIamConfigReconciler(controller)
clusterValidator := mocks.NewMockClusterValidator(controller)
registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster).Build()
r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, nil)
_, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster))
g.Expect(err).To(MatchError(ContainSubstring("deleting self-managed clusters is not supported")))
}
func TestClusterReconcilerReconcileSelfManagedClusterRegAuthFailNoSecret(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
selfManagedCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
RegistryMirrorConfiguration: &anywherev1.RegistryMirrorConfiguration{
Authenticate: true,
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
controller := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(controller)
iam := mocks.NewMockAWSIamConfigReconciler(controller)
clusterValidator := mocks.NewMockClusterValidator(controller)
registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster).Build()
r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, nil)
_, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster))
g.Expect(err).To(MatchError(ContainSubstring("fetching registry auth secret")))
}
func TestClusterReconcilerDeleteExistingCAPIClusterSuccess(t *testing.T) {
secret := createSecret()
managementCluster := vsphereCluster()
managementCluster.Name = "management-cluster"
cluster := vsphereCluster()
cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: "management-cluster"}
now := metav1.Now()
cluster.DeletionTimestamp = &now
datacenterConfig := vsphereDataCenter(cluster)
bundle := createBundle(managementCluster)
machineConfigCP := vsphereCPMachineConfig()
machineConfigWN := vsphereWorkerMachineConfig()
capiCluster := newCAPICluster(cluster.Name, cluster.Namespace)
objs := []runtime.Object{cluster, datacenterConfig, secret, bundle, machineConfigCP, machineConfigWN, managementCluster, capiCluster}
tt := newVsphereClusterReconcilerTest(t, objs...)
req := clusterRequest(cluster)
ctx := context.Background()
_, err := tt.reconciler.Reconcile(ctx, req)
if err != nil {
t.Fatalf("reconcile: (%v)", err)
}
apiCluster := &clusterv1.Cluster{}
err = tt.client.Get(context.TODO(), req.NamespacedName, apiCluster)
if !apierrors.IsNotFound(err) {
t.Fatalf("expected apierrors.IsNotFound but got: (%v)", err)
}
if apiCluster.Status.FailureMessage != nil {
t.Errorf("Expected failure message to be nil. FailureMessage:%s", *apiCluster.Status.FailureMessage)
}
}
func TestClusterReconcilerReconcileDeletePausedCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := vsphereCluster()
managementCluster.Name = "management-cluster"
cluster := vsphereCluster()
cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: "management-cluster"}
controllerutil.AddFinalizer(cluster, controllers.ClusterFinalizerName)
capiCluster := newCAPICluster(cluster.Name, cluster.Namespace)
kcp := testKubeadmControlPlaneFromCluster(cluster)
machineDeployments := machineDeploymentsFromCluster(cluster)
objs := make([]runtime.Object, 0, 5)
objs = append(objs, managementCluster, cluster, capiCluster, kcp)
for _, md := range machineDeployments {
objs = append(objs, md.DeepCopy())
}
// Mark cluster for deletion
now := metav1.Now()
cluster.DeletionTimestamp = &now
// Mark as paused
cluster.PauseReconcile()
controller := gomock.NewController(t)
iam := mocks.NewMockAWSIamConfigReconciler(controller)
clusterValidator := mocks.NewMockClusterValidator(controller)
c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
r := controllers.NewClusterReconciler(c, newRegistryForDummyProviderReconciler(), iam, clusterValidator, nil)
g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).To(Equal(reconcile.Result{}))
api := envtest.NewAPIExpecter(t, c)
cl := envtest.CloneNameNamespace(cluster)
api.ShouldEventuallyMatch(ctx, cl, func(g Gomega) {
g.Expect(
controllerutil.ContainsFinalizer(cluster, controllers.ClusterFinalizerName),
).To(BeTrue(), "Cluster should still have the finalizer")
})
capiCl := envtest.CloneNameNamespace(capiCluster)
api.ShouldEventuallyMatch(ctx, capiCl, func(g Gomega) {
g.Expect(
capiCluster.DeletionTimestamp.IsZero(),
).To(BeTrue(), "CAPI cluster should exist and not be marked for deletion")
})
}
func TestClusterReconcilerReconcileDeleteClusterManagedByCLI(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := vsphereCluster()
managementCluster.Name = "management-cluster"
cluster := vsphereCluster()
cluster.SetManagedBy(managementCluster.Name)
controllerutil.AddFinalizer(cluster, controllers.ClusterFinalizerName)
capiCluster := newCAPICluster(cluster.Name, cluster.Namespace)
// Mark cluster for deletion
now := metav1.Now()
cluster.DeletionTimestamp = &now
// Mark as managed by CLI
cluster.Annotations[anywherev1.ManagedByCLIAnnotation] = "true"
c := fake.NewClientBuilder().WithRuntimeObjects(
managementCluster, cluster, capiCluster,
).Build()
controller := gomock.NewController(t)
iam := mocks.NewMockAWSIamConfigReconciler(controller)
clusterValidator := mocks.NewMockClusterValidator(controller)
r := controllers.NewClusterReconciler(c, newRegistryForDummyProviderReconciler(), iam, clusterValidator, nil)
g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).To(Equal(reconcile.Result{}))
api := envtest.NewAPIExpecter(t, c)
cl := envtest.CloneNameNamespace(cluster)
api.ShouldEventuallyNotExist(ctx, cl)
capiCl := envtest.CloneNameNamespace(capiCluster)
api.ShouldEventuallyMatch(ctx, capiCl, func(g Gomega) {
g.Expect(
capiCluster.DeletionTimestamp.IsZero(),
).To(BeTrue(), "CAPI cluster should exist and not be marked for deletion")
})
}
func TestClusterReconcilerDeleteNoCAPIClusterSuccess(t *testing.T) {
g := NewWithT(t)
secret := createSecret()
managementCluster := vsphereCluster()
managementCluster.Name = "management-cluster"
cluster := vsphereCluster()
cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: "management-cluster"}
now := metav1.Now()
cluster.DeletionTimestamp = &now
datacenterConfig := vsphereDataCenter(cluster)
bundle := createBundle(managementCluster)
machineConfigCP := vsphereCPMachineConfig()
machineConfigWN := vsphereWorkerMachineConfig()
objs := []runtime.Object{cluster, datacenterConfig, secret, bundle, machineConfigCP, machineConfigWN, managementCluster}
g.Expect(cluster.Finalizers).NotTo(ContainElement(controllers.ClusterFinalizerName))
tt := newVsphereClusterReconcilerTest(t, objs...)
req := clusterRequest(cluster)
ctx := context.Background()
controllerutil.AddFinalizer(cluster, controllers.ClusterFinalizerName)
_, err := tt.reconciler.Reconcile(ctx, req)
if err != nil {
t.Fatalf("reconcile: (%v)", err)
}
apiCluster := &anywherev1.Cluster{}
err = tt.client.Get(context.TODO(), req.NamespacedName, apiCluster)
if err != nil {
t.Fatalf("get cluster: (%v)", err)
}
if apiCluster.Status.FailureMessage != nil {
t.Errorf("Expected failure message to be nil. FailureMessage:%s", *apiCluster.Status.FailureMessage)
}
}
func TestClusterReconcilerSkipDontInstallPackagesOnSelfManaged(t *testing.T) {
ctx := context.Background()
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "my-namespace",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
ManagementCluster: anywherev1.ManagementCluster{
Name: "",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
objs := []runtime.Object{cluster}
cb := fake.NewClientBuilder()
mockClient := cb.WithRuntimeObjects(objs...).Build()
nullRegistry := newRegistryForDummyProviderReconciler()
ctrl := gomock.NewController(t)
mockPkgs := mocks.NewMockPackagesClient(ctrl)
mockPkgs.EXPECT().ReconcileDelete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
r := controllers.NewClusterReconciler(mockClient, nullRegistry, nil, nil, mockPkgs)
_, err := r.Reconcile(ctx, clusterRequest(cluster))
if err != nil {
t.Fatalf("expected err to be nil, got %s", err)
}
}
func TestClusterReconcilerDontDeletePackagesOnSelfManaged(t *testing.T) {
ctx := context.Background()
deleteTime := metav1.NewTime(time.Now().Add(-1 * time.Second))
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "my-namespace",
DeletionTimestamp: &deleteTime,
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
ManagementCluster: anywherev1.ManagementCluster{
Name: "",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
objs := []runtime.Object{cluster}
cb := fake.NewClientBuilder()
mockClient := cb.WithRuntimeObjects(objs...).Build()
nullRegistry := newRegistryForDummyProviderReconciler()
ctrl := gomock.NewController(t)
// At the moment, Reconcile won't get this far, but if the time comes when
// deleting self-managed clusters via full cluster lifecycle happens, we
// need to be aware and adapt appropriately.
mockPkgs := mocks.NewMockPackagesClient(ctrl)
mockPkgs.EXPECT().ReconcileDelete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
r := controllers.NewClusterReconciler(mockClient, nullRegistry, nil, nil, mockPkgs)
_, err := r.Reconcile(ctx, clusterRequest(cluster))
if err == nil || !strings.Contains(err.Error(), "deleting self-managed clusters is not supported") {
t.Fatalf("unexpected error %s", err)
}
}
func TestClusterReconcilerPackagesDeletion(s *testing.T) {
newTestCluster := func() *anywherev1.Cluster {
deleteTime := metav1.NewTime(time.Now().Add(-1 * time.Second))
return &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-workload-cluster",
Namespace: "my-namespace",
DeletionTimestamp: &deleteTime,
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
ManagementCluster: anywherev1.ManagementCluster{
Name: "my-management-cluster",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
}
s.Run("errors when packages client errors", func(t *testing.T) {
ctx := context.Background()
log := testr.New(t)
logCtx := ctrl.LoggerInto(ctx, log)
cluster := newTestCluster()
cluster.Spec.BundlesRef.Name = "non-existent"
ctrl := gomock.NewController(t)
objs := []runtime.Object{cluster}
fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
nullRegistry := newRegistryForDummyProviderReconciler()
mockPkgs := mocks.NewMockPackagesClient(ctrl)
mockPkgs.EXPECT().ReconcileDelete(logCtx, log, gomock.Any(), gomock.Any()).Return(fmt.Errorf("test error"))
mockIAM := mocks.NewMockAWSIamConfigReconciler(ctrl)
mockValid := mocks.NewMockClusterValidator(ctrl)
r := controllers.NewClusterReconciler(fakeClient, nullRegistry, mockIAM, mockValid, mockPkgs)
_, err := r.Reconcile(logCtx, clusterRequest(cluster))
if err == nil || !strings.Contains(err.Error(), "test error") {
t.Errorf("expected packages client deletion error, got %s", err)
}
})
}
func TestClusterReconcilerPackagesInstall(s *testing.T) {
newTestCluster := func() *anywherev1.Cluster {
return &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-workload-cluster",
Namespace: "my-namespace",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
ManagementCluster: anywherev1.ManagementCluster{
Name: "my-management-cluster",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
}
s.Run("skips installation when disabled via cluster spec", func(t *testing.T) {
ctx := context.Background()
log := testr.New(t)
logCtx := ctrl.LoggerInto(ctx, log)
cluster := newTestCluster()
cluster.Spec.Packages = &anywherev1.PackageConfiguration{Disable: true}
ctrl := gomock.NewController(t)
bundles := createBundle(cluster)
bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion)
bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name
bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace
secret := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: constants.EksaSystemNamespace,
Name: cluster.Name + "-kubeconfig",
},
}
objs := []runtime.Object{cluster, bundles, secret}
fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
nullRegistry := newRegistryForDummyProviderReconciler()
mockIAM := mocks.NewMockAWSIamConfigReconciler(ctrl)
mockValid := mocks.NewMockClusterValidator(ctrl)
mockValid.EXPECT().ValidateManagementClusterName(logCtx, log, gomock.Any()).Return(nil)
mockPkgs := mocks.NewMockPackagesClient(ctrl)
mockPkgs.EXPECT().
EnableFullLifecycle(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Times(0)
r := controllers.NewClusterReconciler(fakeClient, nullRegistry, mockIAM, mockValid, mockPkgs)
_, err := r.Reconcile(logCtx, clusterRequest(cluster))
if err != nil {
t.Errorf("expected nil error, got %s", err)
}
})
}
func vsphereWorkerMachineConfig() *anywherev1.VSphereMachineConfig {
return &anywherev1.VSphereMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: "VSphereMachineConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name + "-wn",
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: anywherev1.ClusterKind,
Name: name,
},
},
},
Spec: anywherev1.VSphereMachineConfigSpec{
DiskGiB: 40,
Datastore: "test",
Folder: "test",
NumCPUs: 2,
MemoryMiB: 16,
OSFamily: "ubuntu",
ResourcePool: "test",
StoragePolicyName: "test",
Template: "test",
Users: []anywherev1.UserConfiguration{
{
Name: "user",
SshAuthorizedKeys: []string{"ABC"},
},
},
},
Status: anywherev1.VSphereMachineConfigStatus{},
}
}
func newCAPICluster(name, namespace string) *clusterv1.Cluster {
return &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: clusterv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
}
}
func vsphereCPMachineConfig() *anywherev1.VSphereMachineConfig {
return &anywherev1.VSphereMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: "VSphereMachineConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name + "-cp",
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: anywherev1.ClusterKind,
Name: name,
},
},
},
Spec: anywherev1.VSphereMachineConfigSpec{
DiskGiB: 40,
Datastore: "test",
Folder: "test",
NumCPUs: 2,
MemoryMiB: 16,
OSFamily: "ubuntu",
ResourcePool: "test",
StoragePolicyName: "test",
Template: "test",
Users: []anywherev1.UserConfiguration{
{
Name: "user",
SshAuthorizedKeys: []string{"ABC"},
},
},
},
Status: anywherev1.VSphereMachineConfigStatus{},
}
}
func createBundle(cluster *anywherev1.Cluster) *releasev1.Bundles {
return &releasev1.Bundles{
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name,
Namespace: "default",
},
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
KubeVersion: "1.20",
EksD: releasev1.EksDRelease{
Name: "test",
EksDReleaseUrl: "testdata/release.yaml",
KubeVersion: "1.20",
},
CertManager: releasev1.CertManagerBundle{},
ClusterAPI: releasev1.CoreClusterAPI{},
Bootstrap: releasev1.KubeadmBootstrapBundle{},
ControlPlane: releasev1.KubeadmControlPlaneBundle{},
VSphere: releasev1.VSphereBundle{},
Docker: releasev1.DockerBundle{},
Eksa: releasev1.EksaBundle{},
Cilium: releasev1.CiliumBundle{},
Kindnetd: releasev1.KindnetdBundle{},
Flux: releasev1.FluxBundle{},
BottleRocketHostContainers: releasev1.BottlerocketHostContainersBundle{},
ExternalEtcdBootstrap: releasev1.EtcdadmBootstrapBundle{},
ExternalEtcdController: releasev1.EtcdadmControllerBundle{},
Tinkerbell: releasev1.TinkerbellBundle{},
},
},
},
}
}
func vsphereDataCenter(cluster *anywherev1.Cluster) *anywherev1.VSphereDatacenterConfig {
return &anywherev1.VSphereDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: "VSphereDatacenterConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "datacenter",
Namespace: cluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: anywherev1.ClusterKind,
Name: cluster.Name,
},
},
},
Spec: anywherev1.VSphereDatacenterConfigSpec{
Thumbprint: "aaa",
Server: "ssss",
Datacenter: "daaa",
Network: "networkA",
},
Status: anywherev1.VSphereDatacenterConfigStatus{
SpecValid: true,
},
}
}
func vsphereCluster() *anywherev1.Cluster {
return &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: anywherev1.ClusterSpec{
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
DatacenterRef: anywherev1.Ref{
Kind: "VSphereDatacenterConfig",
Name: "datacenter",
},
KubernetesVersion: "1.20",
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &anywherev1.Ref{
Kind: "VSphereMachineConfig",
Name: clusterName + "-cp",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: "VSphereMachineConfig",
Name: clusterName + "-wn",
},
Name: "md-0",
Labels: nil,
},
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
}
func createSecret() *apiv1.Secret {
return &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: vsphere.CredentialsObjectName,
},
Data: map[string][]byte{
"username": []byte("test"),
"password": []byte("test"),
},
}
}
type sameNameCluster struct{ c *anywherev1.Cluster }
func sameName(c *anywherev1.Cluster) gomock.Matcher {
return &sameNameCluster{c}
}
func (s *sameNameCluster) Matches(x interface{}) bool {
cluster, ok := x.(*anywherev1.Cluster)
if !ok {
return false
}
return s.c.Name == cluster.Name && s.c.Namespace == cluster.Namespace
}
func (s *sameNameCluster) String() string {
return fmt.Sprintf("has name %s and namespace %s", s.c.Name, s.c.Namespace)
}
func baseTestVsphereCluster() (*cluster.Config, *releasev1.Bundles) {
config := &cluster.Config{
VSphereMachineConfigs: map[string]*anywherev1.VSphereMachineConfig{},
OIDCConfigs: map[string]*anywherev1.OIDCConfig{},
AWSIAMConfigs: map[string]*anywherev1.AWSIamConfig{},
}
config.Cluster = vsphereCluster()
config.VSphereDatacenter = vsphereDataCenter(config.Cluster)
machineConfigCP := vsphereCPMachineConfig()
machineConfigWorker := vsphereWorkerMachineConfig()
config.VSphereMachineConfigs[machineConfigCP.Name] = machineConfigCP
config.VSphereMachineConfigs[machineConfigWorker.Name] = machineConfigWorker
config.Cluster.Spec.IdentityProviderRefs = []anywherev1.Ref{
{
Kind: anywherev1.OIDCConfigKind,
Name: "my-oidc",
},
{
Kind: anywherev1.AWSIamConfigKind,
Name: "my-iam",
},
}
oidc := &anywherev1.OIDCConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-oidc",
Namespace: config.Cluster.Namespace,
},
}
awsIAM := &anywherev1.AWSIamConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-iam",
Namespace: config.Cluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: anywherev1.ClusterKind,
Name: config.Cluster.Name,
},
},
},
}
config.AWSIAMConfigs[awsIAM.Name] = awsIAM
config.OIDCConfigs[oidc.Name] = oidc
bundles := &releasev1.Bundles{
ObjectMeta: metav1.ObjectMeta{
Name: "my-bundles-ref",
Namespace: config.Cluster.Namespace,
},
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
KubeVersion: "v1.25",
PackageController: releasev1.PackageBundle{
HelmChart: releasev1.Image{},
},
},
},
},
}
config.Cluster.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: bundles.Name,
Namespace: bundles.Namespace,
}
return config, bundles
}
| 1,578 |
eks-anywhere | aws | Go | package controllers_test
import (
"context"
"errors"
"testing"
"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/controllers"
"github.com/aws/eks-anywhere/controllers/mocks"
"github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
Namespace: "my-namespace",
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "my-namespace",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
cluster.Spec.IdentityProviderRefs = []anywherev1.Ref{
{
Kind: anywherev1.OIDCConfigKind,
Name: "my-oidc",
},
{
Kind: anywherev1.AWSIamConfigKind,
Name: "my-iam",
},
}
cluster.SetManagedBy("my-management-cluster")
oidc := &anywherev1.OIDCConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-oidc",
Namespace: cluster.Namespace,
},
}
awsIAM := &anywherev1.AWSIamConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-iam",
Namespace: cluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: anywherev1.ClusterKind,
Name: cluster.Name,
},
},
},
}
bundles := &v1alpha1.Bundles{
ObjectMeta: metav1.ObjectMeta{
Name: "my-bundles-ref",
Namespace: cluster.Namespace,
},
Spec: v1alpha1.BundlesSpec{
VersionsBundles: []v1alpha1.VersionsBundle{
{
KubeVersion: "v1.25",
PackageController: v1alpha1.PackageBundle{
HelmChart: v1alpha1.Image{},
},
},
},
},
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster-kubeconfig",
Namespace: constants.EksaSystemNamespace,
},
}
objs := []runtime.Object{cluster, managementCluster, oidc, awsIAM, bundles, secret}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
iam := newMockAWSIamConfigReconciler(t)
iam.EXPECT().EnsureCASecret(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)).Return(controller.Result{}, nil)
iam.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)).Return(controller.Result{}, nil)
validator := newMockClusterValidator(t)
validator.EXPECT().ValidateManagementClusterName(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)).Return(nil)
pcc := newMockPackagesClient(t)
pcc.EXPECT().Reconcile(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), iam, validator, pcc)
_, err := r.Reconcile(ctx, clusterRequest(cluster))
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Spec.BundlesRef.Namespace, Name: cluster.Spec.BundlesRef.Name}, bundles)).To(Succeed())
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: constants.EksaSystemNamespace, Name: cluster.Name + "-kubeconfig"}, secret)).To(Succeed())
g.Expect(err).NotTo(HaveOccurred())
newOidc := &anywherev1.OIDCConfig{}
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: "my-oidc"}, newOidc)).To(Succeed())
g.Expect(newOidc.OwnerReferences).To(HaveLen(1))
g.Expect(newOidc.OwnerReferences[0].Name).To(Equal(cluster.Name))
newAWSIam := &anywherev1.AWSIamConfig{}
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: "my-iam"}, newAWSIam)).To(Succeed())
g.Expect(newAWSIam.OwnerReferences).To(HaveLen(1))
g.Expect(newAWSIam.OwnerReferences[0]).To(Equal(awsIAM.OwnerReferences[0]))
}
func TestClusterReconcilerReconcileChildObjectNotFound(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
Namespace: "my-namespace",
},
}
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "my-namespace",
},
}
cluster.Spec.IdentityProviderRefs = []anywherev1.Ref{
{
Kind: anywherev1.OIDCConfigKind,
Name: "my-oidc",
},
{
Kind: anywherev1.AWSIamConfigKind,
Name: "my-iam",
},
}
cluster.SetManagedBy("my-management-cluster")
objs := []runtime.Object{cluster, managementCluster}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
api := envtest.NewAPIExpecter(t, cl)
r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t), nil)
g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).Error().To(MatchError(ContainSubstring("not found")))
c := envtest.CloneNameNamespace(cluster)
api.ShouldEventuallyMatch(ctx, c, func(g Gomega) {
g.Expect(c.Status.FailureMessage).To(HaveValue(Equal(
"Dependent cluster objects don't exist: oidcconfigs.anywhere.eks.amazonaws.com \"my-oidc\" not found",
)))
})
}
func TestClusterReconcilerSetupWithManager(t *testing.T) {
client := env.Client()
r := controllers.NewClusterReconciler(client, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t), nil)
g := NewWithT(t)
g.Expect(r.SetupWithManager(env.Manager(), env.Manager().GetLogger())).To(Succeed())
}
func TestClusterReconcilerManagementClusterNotFound(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
},
}
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "my-namespace",
},
}
cluster.SetManagedBy("my-management-cluster")
objs := []runtime.Object{cluster, managementCluster}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
api := envtest.NewAPIExpecter(t, cl)
r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t), nil)
g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).Error().To(MatchError(ContainSubstring("\"my-management-cluster\" not found")))
c := envtest.CloneNameNamespace(cluster)
api.ShouldEventuallyMatch(ctx, c, func(g Gomega) {
g.Expect(c.Status.FailureMessage).To(HaveValue(Equal("Management cluster my-management-cluster does not exist")))
})
}
func TestClusterReconcilerSetBundlesRef(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
cluster.SetManagedBy("my-management-cluster")
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster-kubeconfig",
Namespace: constants.EksaSystemNamespace,
},
}
bundles := &v1alpha1.Bundles{
ObjectMeta: metav1.ObjectMeta{
Name: "my-bundles-ref",
Namespace: cluster.Spec.BundlesRef.Namespace,
},
Spec: v1alpha1.BundlesSpec{
VersionsBundles: []v1alpha1.VersionsBundle{
{
KubeVersion: "v1.25",
PackageController: v1alpha1.PackageBundle{
HelmChart: v1alpha1.Image{},
},
},
},
},
}
objs := []runtime.Object{cluster, managementCluster, secret, bundles}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
mgmtCluster := &anywherev1.Cluster{}
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: managementCluster.Name}, mgmtCluster)).To(Succeed())
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Spec.BundlesRef.Namespace, Name: cluster.Spec.BundlesRef.Name}, bundles)).To(Succeed())
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: constants.EksaSystemNamespace, Name: cluster.Name + "-kubeconfig"}, secret)).To(Succeed())
pcc := newMockPackagesClient(t)
pcc.EXPECT().Reconcile(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
validator := newMockClusterValidator(t)
validator.EXPECT().ValidateManagementClusterName(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)).Return(nil)
r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), validator, pcc)
_, err := r.Reconcile(ctx, clusterRequest(cluster))
g.Expect(err).ToNot(HaveOccurred())
newCluster := &anywherev1.Cluster{}
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: "my-cluster"}, newCluster)).To(Succeed())
g.Expect(newCluster.Spec.BundlesRef).To(Equal(mgmtCluster.Spec.BundlesRef))
}
func TestClusterReconcilerWorkloadClusterMgmtClusterNameFail(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
managementCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
Namespace: "my-namespace",
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "my-namespace",
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}
cluster.SetManagedBy("my-management-cluster")
objs := []runtime.Object{cluster, managementCluster}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
validator := newMockClusterValidator(t)
validator.EXPECT().ValidateManagementClusterName(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)).
Return(errors.New("test error"))
r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), validator, nil)
_, err := r.Reconcile(ctx, clusterRequest(cluster))
g.Expect(err).To(HaveOccurred())
}
func newRegistryForDummyProviderReconciler() controllers.ProviderClusterReconcilerRegistry {
return newRegistryMock(dummyProviderReconciler{})
}
func newRegistryMock(reconciler clusters.ProviderClusterReconciler) dummyProviderReconcilerRegistry {
return dummyProviderReconcilerRegistry{
reconciler: reconciler,
}
}
type dummyProviderReconcilerRegistry struct {
reconciler clusters.ProviderClusterReconciler
}
func (d dummyProviderReconcilerRegistry) Get(_ string) clusters.ProviderClusterReconciler {
return d.reconciler
}
type dummyProviderReconciler struct{}
func (dummyProviderReconciler) Reconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
return controller.Result{}, nil
}
func (dummyProviderReconciler) ReconcileCNI(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) {
return controller.Result{}, nil
}
func (dummyProviderReconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
return controller.Result{}, nil
}
func clusterRequest(cluster *anywherev1.Cluster) reconcile.Request {
return reconcile.Request{
NamespacedName: types.NamespacedName{
Name: cluster.Name,
Namespace: cluster.Namespace,
},
}
}
func nullLog() logr.Logger {
return logr.New(logf.NullLogSink{})
}
func newMockAWSIamConfigReconciler(t *testing.T) *mocks.MockAWSIamConfigReconciler {
ctrl := gomock.NewController(t)
return mocks.NewMockAWSIamConfigReconciler(ctrl)
}
func newMockClusterValidator(t *testing.T) *mocks.MockClusterValidator {
ctrl := gomock.NewController(t)
return mocks.NewMockClusterValidator(ctrl)
}
func newMockPackagesClient(t *testing.T) *mocks.MockPackagesClient {
ctrl := gomock.NewController(t)
return mocks.NewMockPackagesClient(ctrl)
}
| 413 |
eks-anywhere | aws | Go | package controllers
import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
// DockerDatacenterReconciler reconciles a DockerDatacenterConfig object.
type DockerDatacenterReconciler struct {
client client.Client
}
// NewDockerDatacenterReconciler creates a new instance of the DockerDatacenterReconciler struct.
func NewDockerDatacenterReconciler(client client.Client) *DockerDatacenterReconciler {
return &DockerDatacenterReconciler{
client: client,
}
}
| 18 |
eks-anywhere | aws | Go | package controllers
import (
"context"
"github.com/go-logr/logr"
"github.com/google/uuid"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/controller-runtime/pkg/manager"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
awsiamconfigreconciler "github.com/aws/eks-anywhere/pkg/awsiamauth/reconciler"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/crypto"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/executables/cmk"
ciliumreconciler "github.com/aws/eks-anywhere/pkg/networking/cilium/reconciler"
cnireconciler "github.com/aws/eks-anywhere/pkg/networking/reconciler"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack"
cloudstackreconciler "github.com/aws/eks-anywhere/pkg/providers/cloudstack/reconciler"
dockerreconciler "github.com/aws/eks-anywhere/pkg/providers/docker/reconciler"
nutanixreconciler "github.com/aws/eks-anywhere/pkg/providers/nutanix/reconciler"
"github.com/aws/eks-anywhere/pkg/providers/snow"
snowreconciler "github.com/aws/eks-anywhere/pkg/providers/snow/reconciler"
tinkerbellreconciler "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/reconciler"
vspherereconciler "github.com/aws/eks-anywhere/pkg/providers/vsphere/reconciler"
)
type Manager = manager.Manager
type Factory struct {
buildSteps []buildStep
dependencyFactory *dependencies.Factory
manager Manager
registryBuilder *clusters.ProviderClusterReconcilerRegistryBuilder
reconcilers Reconcilers
tracker *remote.ClusterCacheTracker
registry *clusters.ProviderClusterReconcilerRegistry
dockerClusterReconciler *dockerreconciler.Reconciler
vsphereClusterReconciler *vspherereconciler.Reconciler
tinkerbellClusterReconciler *tinkerbellreconciler.Reconciler
snowClusterReconciler *snowreconciler.Reconciler
cloudstackClusterReconciler *cloudstackreconciler.Reconciler
nutanixClusterReconciler *nutanixreconciler.Reconciler
cniReconciler *cnireconciler.Reconciler
ipValidator *clusters.IPValidator
awsIamConfigReconciler *awsiamconfigreconciler.Reconciler
logger logr.Logger
deps *dependencies.Dependencies
packageControllerClient *curatedpackages.PackageControllerClient
cloudStackValidatorRegistry cloudstack.ValidatorRegistry
}
type Reconcilers struct {
ClusterReconciler *ClusterReconciler
DockerDatacenterReconciler *DockerDatacenterReconciler
VSphereDatacenterReconciler *VSphereDatacenterReconciler
SnowMachineConfigReconciler *SnowMachineConfigReconciler
TinkerbellDatacenterReconciler *TinkerbellDatacenterReconciler
CloudStackDatacenterReconciler *CloudStackDatacenterReconciler
NutanixDatacenterReconciler *NutanixDatacenterReconciler
}
type buildStep func(ctx context.Context) error
func NewFactory(logger logr.Logger, manager Manager) *Factory {
return &Factory{
buildSteps: make([]buildStep, 0),
dependencyFactory: dependencies.NewFactory().WithLocalExecutables(),
manager: manager,
logger: logger,
}
}
func (f *Factory) Build(ctx context.Context) (*Reconcilers, error) {
deps, err := f.dependencyFactory.Build(ctx)
if err != nil {
return nil, err
}
f.deps = deps
for _, step := range f.buildSteps {
if err := step(ctx); err != nil {
return nil, err
}
}
f.buildSteps = make([]buildStep, 0)
return &f.reconcilers, nil
}
// Close cleans up any open resources from the created dependencies.
func (f *Factory) Close(ctx context.Context) error {
return f.deps.Close(ctx)
}
// WithClusterReconciler builds the cluster reconciler.
func (f *Factory) WithClusterReconciler(capiProviders []clusterctlv1.Provider, opts ...ClusterReconcilerOption) *Factory {
f.dependencyFactory.WithGovc()
f.withTracker().
WithProviderClusterReconcilerRegistry(capiProviders).
withAWSIamConfigReconciler().
withPackageControllerClient()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.reconcilers.ClusterReconciler != nil {
return nil
}
f.reconcilers.ClusterReconciler = NewClusterReconciler(
f.manager.GetClient(),
f.registry,
f.awsIamConfigReconciler,
clusters.NewClusterValidator(f.manager.GetClient()),
f.packageControllerClient,
opts...,
)
return nil
})
return f
}
// WithDockerDatacenterReconciler adds the DockerDatacenterReconciler to the controller factory.
func (f *Factory) WithDockerDatacenterReconciler() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.reconcilers.DockerDatacenterReconciler != nil {
return nil
}
f.reconcilers.DockerDatacenterReconciler = NewDockerDatacenterReconciler(
f.manager.GetClient(),
)
return nil
})
return f
}
func (f *Factory) WithVSphereDatacenterReconciler() *Factory {
f.dependencyFactory.WithVSphereDefaulter().WithVSphereValidator()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.reconcilers.VSphereDatacenterReconciler != nil {
return nil
}
f.reconcilers.VSphereDatacenterReconciler = NewVSphereDatacenterReconciler(
f.manager.GetClient(),
f.deps.VSphereValidator,
f.deps.VSphereDefaulter,
)
return nil
})
return f
}
func (f *Factory) WithSnowMachineConfigReconciler() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.reconcilers.SnowMachineConfigReconciler != nil {
return nil
}
client := f.manager.GetClient()
f.reconcilers.SnowMachineConfigReconciler = NewSnowMachineConfigReconciler(
client,
snow.NewValidator(snowreconciler.NewAwsClientBuilder(client)),
)
return nil
})
return f
}
// WithTinkerbellDatacenterReconciler adds the TinkerbellDatacenterReconciler to the controller factory.
func (f *Factory) WithTinkerbellDatacenterReconciler() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.reconcilers.TinkerbellDatacenterReconciler != nil {
return nil
}
f.reconcilers.TinkerbellDatacenterReconciler = NewTinkerbellDatacenterReconciler(
f.manager.GetClient(),
)
return nil
})
return f
}
// WithCloudStackDatacenterReconciler adds the CloudStackDatacenterReconciler to the controller factory.
func (f *Factory) WithCloudStackDatacenterReconciler() *Factory {
f.withCloudStackValidatorRegistry()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.reconcilers.CloudStackDatacenterReconciler != nil {
return nil
}
f.reconcilers.CloudStackDatacenterReconciler = NewCloudStackDatacenterReconciler(
f.manager.GetClient(),
f.cloudStackValidatorRegistry,
)
return nil
})
return f
}
// WithNutanixDatacenterReconciler adds the NutanixDatacenterReconciler to the controller factory.
func (f *Factory) WithNutanixDatacenterReconciler() *Factory {
f.dependencyFactory.WithNutanixDefaulter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.reconcilers.NutanixDatacenterReconciler != nil {
return nil
}
f.reconcilers.NutanixDatacenterReconciler = NewNutanixDatacenterReconciler(
f.manager.GetClient(),
f.deps.NutanixDefaulter,
)
return nil
})
return f
}
// withNutanixClusterReconciler adds the NutanixClusterReconciler to the controller factory.
func (f *Factory) withNutanixClusterReconciler() *Factory {
f.dependencyFactory.WithNutanixDefaulter().WithNutanixValidator()
f.withTracker().withCNIReconciler().withIPValidator()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.nutanixClusterReconciler != nil {
return nil
}
f.nutanixClusterReconciler = nutanixreconciler.New(
f.manager.GetClient(),
f.deps.NutanixValidator,
f.cniReconciler,
f.tracker,
f.ipValidator,
)
f.registryBuilder.Add(anywherev1.NutanixDatacenterKind, f.nutanixClusterReconciler)
return nil
})
return f
}
func (f *Factory) withTracker() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.tracker != nil {
return nil
}
logger := f.logger.WithName("remote").WithName("ClusterCacheTracker")
tracker, err := remote.NewClusterCacheTracker(
f.manager,
remote.ClusterCacheTrackerOptions{
Log: &logger,
Indexes: remote.DefaultIndexes,
},
)
if err != nil {
return err
}
f.tracker = tracker
return nil
})
return f
}
const (
dockerProviderName = "docker"
snowProviderName = "snow"
vSphereProviderName = "vsphere"
tinkerbellProviderName = "tinkerbell"
cloudstackProviderName = "cloudstack"
nutanixProviderName = "nutanix"
)
func (f *Factory) WithProviderClusterReconcilerRegistry(capiProviders []clusterctlv1.Provider) *Factory {
f.registryBuilder = clusters.NewProviderClusterReconcilerRegistryBuilder()
for _, p := range capiProviders {
if p.Type != string(clusterctlv1.InfrastructureProviderType) {
continue
}
switch p.ProviderName {
case dockerProviderName:
f.withDockerClusterReconciler()
case snowProviderName:
f.withSnowClusterReconciler()
case vSphereProviderName:
f.withVSphereClusterReconciler()
case tinkerbellProviderName:
f.withTinkerbellClusterReconciler()
case cloudstackProviderName:
f.withCloudStackClusterReconciler()
case nutanixProviderName:
f.withNutanixClusterReconciler()
default:
f.logger.Info("Found unknown CAPI provider, ignoring", "providerName", p.ProviderName)
}
}
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.registry != nil {
return nil
}
r := f.registryBuilder.Build()
f.registry = &r
return nil
})
return f
}
func (f *Factory) withDockerClusterReconciler() *Factory {
f.withCNIReconciler().withTracker()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dockerClusterReconciler != nil {
return nil
}
f.dockerClusterReconciler = dockerreconciler.New(
f.manager.GetClient(),
f.cniReconciler,
f.tracker,
)
f.registryBuilder.Add(anywherev1.DockerDatacenterKind, f.dockerClusterReconciler)
return nil
})
return f
}
func (f *Factory) withVSphereClusterReconciler() *Factory {
f.dependencyFactory.WithVSphereDefaulter().WithVSphereValidator()
f.withTracker().withCNIReconciler().withIPValidator()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.vsphereClusterReconciler != nil {
return nil
}
f.vsphereClusterReconciler = vspherereconciler.New(
f.manager.GetClient(),
f.deps.VSphereValidator,
f.deps.VSphereDefaulter,
f.cniReconciler,
f.tracker,
f.ipValidator,
)
f.registryBuilder.Add(anywherev1.VSphereDatacenterKind, f.vsphereClusterReconciler)
return nil
})
return f
}
func (f *Factory) withSnowClusterReconciler() *Factory {
f.withCNIReconciler().withTracker().withIPValidator()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.snowClusterReconciler != nil {
return nil
}
f.snowClusterReconciler = snowreconciler.New(
f.manager.GetClient(),
f.cniReconciler,
f.tracker,
f.ipValidator,
)
f.registryBuilder.Add(anywherev1.SnowDatacenterKind, f.snowClusterReconciler)
return nil
})
return f
}
func (f *Factory) withTinkerbellClusterReconciler() *Factory {
f.withCNIReconciler().withTracker().withIPValidator()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.tinkerbellClusterReconciler != nil {
return nil
}
f.tinkerbellClusterReconciler = tinkerbellreconciler.New(
f.manager.GetClient(),
f.cniReconciler,
f.tracker,
f.ipValidator,
)
f.registryBuilder.Add(anywherev1.TinkerbellDatacenterKind, f.tinkerbellClusterReconciler)
return nil
})
return f
}
func (f *Factory) withCloudStackClusterReconciler() *Factory {
f.withCNIReconciler().withTracker().withIPValidator().withCloudStackValidatorRegistry()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.cloudstackClusterReconciler != nil {
return nil
}
f.cloudstackClusterReconciler = cloudstackreconciler.New(
f.manager.GetClient(),
f.ipValidator,
f.cniReconciler,
f.tracker,
f.cloudStackValidatorRegistry,
)
f.registryBuilder.Add(anywherev1.CloudStackDatacenterKind, f.cloudstackClusterReconciler)
return nil
})
return f
}
func (f *Factory) withCloudStackValidatorRegistry() *Factory {
f.dependencyFactory.WithWriter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.cloudStackValidatorRegistry != nil {
return nil
}
cmkBuilder := cmk.NewCmkBuilder(executables.NewLocalExecutablesBuilder())
f.cloudStackValidatorRegistry = cloudstack.NewValidatorFactory(cmkBuilder, f.deps.Writer, false)
return nil
})
return f
}
func (f *Factory) withCNIReconciler() *Factory {
f.dependencyFactory.WithCiliumTemplater()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.cniReconciler != nil {
return nil
}
f.cniReconciler = cnireconciler.New(ciliumreconciler.New(f.deps.CiliumTemplater))
return nil
})
return f
}
func (f *Factory) withIPValidator() *Factory {
f.dependencyFactory.WithIPValidator()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.ipValidator != nil {
return nil
}
f.ipValidator = clusters.NewIPValidator(f.deps.IPValidator, f.manager.GetClient())
return nil
})
return f
}
func (f *Factory) withAWSIamConfigReconciler() *Factory {
f.withTracker()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.awsIamConfigReconciler != nil {
return nil
}
certgen := crypto.NewCertificateGenerator()
generateUUID := uuid.New
f.awsIamConfigReconciler = awsiamconfigreconciler.New(
certgen,
generateUUID,
f.manager.GetClient(),
f.tracker,
)
return nil
})
return f
}
func (f *Factory) withPackageControllerClient() *Factory {
f.dependencyFactory.WithHelm().WithKubectl()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.packageControllerClient != nil {
return nil
}
f.packageControllerClient = curatedpackages.NewPackageControllerClientFullLifecycle(f.logger, f.deps.Helm, f.deps.Kubectl, f.tracker)
return nil
})
return f
}
| 528 |
eks-anywhere | aws | Go | package controllers_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"github.com/aws/eks-anywhere/controllers"
"github.com/aws/eks-anywhere/controllers/mocks"
)
func TestFactoryBuildAllVSphereReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager).
WithVSphereDatacenterReconciler()
// testing idempotence
f.WithVSphereDatacenterReconciler()
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.VSphereDatacenterReconciler).NotTo(BeNil())
}
func TestFactoryBuildAllDockerReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager).
WithDockerDatacenterReconciler()
// testing idempotence
f.WithDockerDatacenterReconciler()
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.DockerDatacenterReconciler).NotTo(BeNil())
}
func TestFactoryBuildAllTinkerbellReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager).
WithTinkerbellDatacenterReconciler()
// testing idempotence
f.WithTinkerbellDatacenterReconciler()
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.TinkerbellDatacenterReconciler).NotTo(BeNil())
}
func TestFactoryBuildAllCloudStackReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager).
WithCloudStackDatacenterReconciler()
// testing idempotence
f.WithCloudStackDatacenterReconciler()
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.CloudStackDatacenterReconciler).NotTo(BeNil())
}
func TestFactoryBuildAllNutanixReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager).
WithNutanixDatacenterReconciler().
WithClusterReconciler([]clusterctlv1.Provider{
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "nutanix",
},
})
// testing idempotence
f.WithNutanixDatacenterReconciler().
WithClusterReconciler([]clusterctlv1.Provider{
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "nutanix",
},
})
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.NutanixDatacenterReconciler).NotTo(BeNil())
}
func TestFactoryBuildClusterReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
providers := []clusterctlv1.Provider{
{
Type: string(clusterctlv1.ControlPlaneProviderType),
ProviderName: "kubeadm",
},
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "docker",
},
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "vsphere",
},
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "snow",
},
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "tinkerbell",
},
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "cloudstack",
},
{
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "unknown-provider",
},
}
f := controllers.NewFactory(logger, manager).
WithClusterReconciler(providers)
// testing idempotence
f.WithClusterReconciler(providers)
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.ClusterReconciler).NotTo(BeNil())
}
func TestFactoryBuildAllSnowReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager).
WithSnowMachineConfigReconciler()
// testing idempotence
f.WithSnowMachineConfigReconciler()
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.SnowMachineConfigReconciler).NotTo(BeNil())
}
func TestFactoryClose(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager)
_, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(f.Close(ctx)).To(Succeed())
}
func TestFactoryWithNutanixDatacenterReconciler(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
logger := nullLog()
ctrl := gomock.NewController(t)
manager := mocks.NewMockManager(ctrl)
manager.EXPECT().GetClient().AnyTimes()
manager.EXPECT().GetScheme().AnyTimes()
f := controllers.NewFactory(logger, manager).
WithNutanixDatacenterReconciler()
// testing idempotence
f.WithNutanixDatacenterReconciler()
reconcilers, err := f.Build(ctx)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconcilers.NutanixDatacenterReconciler).NotTo(BeNil())
}
| 232 |
eks-anywhere | aws | Go | package controllers_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/internal/test/envtest"
)
var env *envtest.Environment
func TestMain(m *testing.M) {
os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env)))
}
| 15 |
eks-anywhere | aws | Go | package controllers
import (
"context"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/providers/nutanix"
)
// NutanixDatacenterReconciler reconciles a NutanixDatacenterConfig object.
type NutanixDatacenterReconciler struct {
client client.Client
defaulter *nutanix.Defaulter
}
// Reconcile reconciles a NutanixDatacenterConfig object.
func (r *NutanixDatacenterReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
dc := &anywherev1.NutanixDatacenterConfig{}
if err := r.client.Get(ctx, request.NamespacedName, dc); err != nil {
return ctrl.Result{}, err
}
r.defaulter.SetDefaultsForDatacenterConfig(*dc)
if !dc.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, dc)
}
return ctrl.Result{}, nil
}
func (r *NutanixDatacenterReconciler) reconcileDelete(ctx context.Context, dc *anywherev1.NutanixDatacenterConfig) (ctrl.Result, error) {
return ctrl.Result{}, nil
}
// NewNutanixDatacenterReconciler constructs a new NutanixDatacenterReconciler.
func NewNutanixDatacenterReconciler(client client.Client, defaulter *nutanix.Defaulter) *NutanixDatacenterReconciler {
return &NutanixDatacenterReconciler{
client: client,
defaulter: defaulter,
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *NutanixDatacenterReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&anywherev1.NutanixDatacenterConfig{}).
Complete(r)
}
| 53 |
eks-anywhere | aws | Go | package controllers_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/controllers"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/providers/nutanix"
)
func TestNutanixDatacenterConfigReconcilerSetupWithManager(t *testing.T) {
client := env.Client()
r := controllers.NewNutanixDatacenterReconciler(client, nutanix.NewDefaulter())
g := NewWithT(t)
g.Expect(r.SetupWithManager(env.Manager())).To(Succeed())
}
func TestNutanixDatacenterConfigReconcilerSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
config := nutanixDatacenterConfig()
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewNutanixDatacenterReconciler(cl, nutanix.NewDefaulter())
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "nutanix-datacenter-config",
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).NotTo(HaveOccurred())
ndc := &anywherev1.NutanixDatacenterConfig{}
err = cl.Get(ctx, req.NamespacedName, ndc)
g.Expect(err).NotTo(HaveOccurred())
}
func TestNutanixDatacenterConfigReconcileDelete(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
config := nutanixDatacenterConfig()
now := metav1.Now()
config.DeletionTimestamp = &now
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewNutanixDatacenterReconciler(cl, nutanix.NewDefaulter())
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "nutanix-datacenter-config",
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).NotTo(HaveOccurred())
}
func TestNutanixDatacenterConfigReconcilerFailure(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects().Build()
r := controllers.NewNutanixDatacenterReconciler(cl, nutanix.NewDefaulter())
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "nutanix-datacenter-config",
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(HaveOccurred())
}
func nutanixDatacenterConfig() *anywherev1.NutanixDatacenterConfig {
return &anywherev1.NutanixDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "nutanix-datacenter-config",
},
Spec: anywherev1.NutanixDatacenterConfigSpec{
Endpoint: "prism.nutanix.com",
Port: 9440,
},
}
}
| 104 |
eks-anywhere | aws | Go | package controllers
import (
"context"
"fmt"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
type Validator interface {
ValidateEC2SshKeyNameExists(ctx context.Context, m *anywherev1.SnowMachineConfig) error
ValidateEC2ImageExistsOnDevice(ctx context.Context, m *anywherev1.SnowMachineConfig) error
}
// SnowMachineConfigReconciler reconciles a SnowMachineConfig object.
type SnowMachineConfigReconciler struct {
client client.Client
validator Validator
}
// NewSnowMachineConfigReconciler constructs a new SnowMachineConfigReconciler.
func NewSnowMachineConfigReconciler(client client.Client, validator Validator) *SnowMachineConfigReconciler {
return &SnowMachineConfigReconciler{
client: client,
validator: validator,
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *SnowMachineConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&anywherev1.SnowMachineConfig{}).
Complete(r)
}
// TODO: add here kubebuilder permissions as needed.
// Reconcile implements the reconcile.Reconciler interface.
func (r *SnowMachineConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
log := ctrl.LoggerFrom(ctx)
// Fetch the SnowMachineConfig object
snowMachineConfig := &anywherev1.SnowMachineConfig{}
log.Info("Reconciling snowmachineconfig")
if err := r.client.Get(ctx, req.NamespacedName, snowMachineConfig); err != nil {
return ctrl.Result{}, err
}
// Initialize the patch helper
patchHelper, err := patch.NewHelper(snowMachineConfig, r.client)
if err != nil {
return ctrl.Result{}, err
}
defer func() {
// Always attempt to patch the object and status after each reconciliation.
patchOpts := []patch.Option{}
if err := patchHelper.Patch(ctx, snowMachineConfig, patchOpts...); err != nil {
reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("patching snowmachineconfig: %v", err)})
}
}()
// There's no need to go any further if the SnowMachineConfig is marked for deletion.
if !snowMachineConfig.DeletionTimestamp.IsZero() {
return ctrl.Result{}, reterr
}
result, err := r.reconcile(ctx, snowMachineConfig)
if err != nil {
reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("reconciling snowmachineconfig: %v", err)})
}
return result, reterr
}
func (r *SnowMachineConfigReconciler) reconcile(ctx context.Context, snowMachineConfig *anywherev1.SnowMachineConfig) (_ ctrl.Result, reterr error) {
var allErrs []error
if err := r.validator.ValidateEC2ImageExistsOnDevice(ctx, snowMachineConfig); err != nil {
allErrs = append(allErrs, err)
}
if err := r.validator.ValidateEC2SshKeyNameExists(ctx, snowMachineConfig); err != nil {
allErrs = append(allErrs, err)
}
if len(allErrs) > 0 {
snowMachineConfig.Status.SpecValid = false
aggregate := kerrors.NewAggregate(allErrs)
failureMessage := aggregate.Error()
snowMachineConfig.Status.FailureMessage = &failureMessage
return ctrl.Result{}, aggregate
}
snowMachineConfig.Status.SpecValid = true
snowMachineConfig.Status.FailureMessage = nil
return ctrl.Result{}, nil
}
| 99 |
eks-anywhere | aws | Go | package controllers_test
import (
"context"
"errors"
"fmt"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/controllers"
"github.com/aws/eks-anywhere/controllers/mocks"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
var (
name = "test-cluster"
namespace = "eksa-system"
)
func TestSnowMachineConfigReconcilerSetupWithManager(t *testing.T) {
client := env.Client()
r := controllers.NewSnowMachineConfigReconciler(client, nil)
g := NewWithT(t)
g.Expect(r.SetupWithManager(env.Manager())).To(Succeed())
}
func TestSnowMachineConfigReconcilerSuccess(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
ctx := context.Background()
config := createSnowMachineConfig()
validator := mocks.NewMockValidator(ctrl)
validator.EXPECT().ValidateEC2ImageExistsOnDevice(ctx, config).Return(nil)
validator.EXPECT().ValidateEC2SshKeyNameExists(ctx, config).Return(nil)
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewSnowMachineConfigReconciler(cl, validator)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).NotTo(HaveOccurred())
snowMachineConfig := &anywherev1.SnowMachineConfig{}
err = cl.Get(ctx, req.NamespacedName, snowMachineConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(snowMachineConfig.Status.FailureMessage).To(BeNil())
g.Expect(snowMachineConfig.Status.SpecValid).To(BeTrue())
}
func TestSnowMachineConfigReconcilerFailureIncorrectObject(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
config := &anywherev1.SnowDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowDatacenterKind,
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
}
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewSnowMachineConfigReconciler(cl, nil)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(HaveOccurred())
}
func TestSnowMachineConfigReconcilerDelete(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
config := createSnowMachineConfig()
config.DeletionTimestamp = &metav1.Time{Time: time.Now()}
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewSnowMachineConfigReconciler(cl, nil)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).NotTo(HaveOccurred())
}
func TestSnowMachineConfigReconcilerFailureImageExists(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
ctx := context.Background()
config := createSnowMachineConfig()
validator := mocks.NewMockValidator(ctrl)
validator.EXPECT().ValidateEC2SshKeyNameExists(ctx, config).Return(nil)
validator.EXPECT().ValidateEC2ImageExistsOnDevice(ctx, config).Return(errors.New("test error"))
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewSnowMachineConfigReconciler(cl, validator)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(HaveOccurred())
snowMachineConfig := &anywherev1.SnowMachineConfig{}
err = cl.Get(ctx, req.NamespacedName, snowMachineConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(snowMachineConfig.Status.FailureMessage).NotTo(BeNil())
g.Expect(snowMachineConfig.Status.SpecValid).To(BeFalse())
}
func TestSnowMachineConfigReconcilerFailureKeyNameExists(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
ctx := context.Background()
config := createSnowMachineConfig()
validator := mocks.NewMockValidator(ctrl)
validator.EXPECT().ValidateEC2ImageExistsOnDevice(ctx, config).Return(nil)
validator.EXPECT().ValidateEC2SshKeyNameExists(ctx, config).Return(errors.New("test error"))
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewSnowMachineConfigReconciler(cl, validator)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(ctx, req)
fmt.Println("test")
fmt.Println(err.Error())
g.Expect(err).To(HaveOccurred())
snowMachineConfig := &anywherev1.SnowMachineConfig{}
err = cl.Get(ctx, req.NamespacedName, snowMachineConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(snowMachineConfig.Status.FailureMessage).NotTo(BeNil())
g.Expect(snowMachineConfig.Status.SpecValid).To(BeFalse())
}
func TestSnowMachineConfigReconcilerFailureAggregate(t *testing.T) {
g := NewWithT(t)
ctrl := gomock.NewController(t)
ctx := context.Background()
config := createSnowMachineConfig()
validator := mocks.NewMockValidator(ctrl)
validator.EXPECT().ValidateEC2ImageExistsOnDevice(ctx, config).Return(errors.New("test error1"))
validator.EXPECT().ValidateEC2SshKeyNameExists(ctx, config).Return(errors.New("test error2"))
objs := []runtime.Object{config}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
r := controllers.NewSnowMachineConfigReconciler(cl, validator)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(ctx, req)
g.Expect(err).To(HaveOccurred())
// Now check to make sure error returned contains substring and right number of aggregate of errors
errorPrefix := "reconciling snowmachineconfig: "
g.Expect(err.Error()).To(ContainSubstring(errorPrefix))
result := strings.TrimPrefix(err.Error(), errorPrefix)
errors := strings.Split(result, ", ")
g.Expect(len(errors)).To(BeIdenticalTo(2))
snowMachineConfig := &anywherev1.SnowMachineConfig{}
err = cl.Get(ctx, req.NamespacedName, snowMachineConfig)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(snowMachineConfig.Status.SpecValid).To(BeFalse())
// Now check to make sure failure message status contains substring and right number of aggregate of errors
g.Expect(snowMachineConfig.Status.FailureMessage).NotTo(BeNil())
errors = strings.Split(*snowMachineConfig.Status.FailureMessage, ", ")
g.Expect(len(errors)).To(BeIdenticalTo(2))
}
func createSnowMachineConfig() *anywherev1.SnowMachineConfig {
return &anywherev1.SnowMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowMachineConfigKind,
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
Spec: anywherev1.SnowMachineConfigSpec{
Devices: []string{"test-ip-1", "test-ip-2"},
AMIID: "test-ami",
SshKeyName: "test-key",
},
}
}
| 258 |
eks-anywhere | aws | Go | package controllers
import (
"context"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
// TinkerbellDatacenterReconciler reconciles a TinkerbellDatacenterConfig object.
type TinkerbellDatacenterReconciler struct {
client client.Client
}
// NewTinkerbellDatacenterReconciler creates a new instance of the TinkerbellDatacenterReconciler struct.
func NewTinkerbellDatacenterReconciler(client client.Client) *TinkerbellDatacenterReconciler {
return &TinkerbellDatacenterReconciler{
client: client,
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *TinkerbellDatacenterReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&anywherev1.TinkerbellDatacenterConfig{}).
Complete(r)
}
// TODO: add here kubebuilder permissions as neeeded.
// Reconcile implements the reconcile.Reconciler interface.
func (r *TinkerbellDatacenterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
// TODO fetch Tinkerbell datacenter object and implement reconcile
return ctrl.Result{}, nil
}
| 38 |
eks-anywhere | aws | Go | package controllers_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/controllers"
)
func TestTinkerbellDatacenterReconcilerSetupWithManager(t *testing.T) {
client := env.Client()
r := controllers.NewTinkerbellDatacenterReconciler(client)
g := NewWithT(t)
g.Expect(r.SetupWithManager(env.Manager())).To(Succeed())
}
| 18 |
eks-anywhere | aws | Go | package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/reconciler"
)
// VSphereDatacenterReconciler reconciles a VSphereDatacenterConfig object.
type VSphereDatacenterReconciler struct {
client client.Client
defaulter *vsphere.Defaulter
validator *vsphere.Validator
}
// NewVSphereDatacenterReconciler constructs a new VSphereDatacenterReconciler.
func NewVSphereDatacenterReconciler(client client.Client, validator *vsphere.Validator, defaulter *vsphere.Defaulter) *VSphereDatacenterReconciler {
return &VSphereDatacenterReconciler{
client: client,
validator: validator,
defaulter: defaulter,
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *VSphereDatacenterReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&anywherev1.VSphereDatacenterConfig{}).
Complete(r)
}
// TODO: add here kubebuilder permissions as neeeded.
// Reconcile implements the reconcile.Reconciler interface.
func (r *VSphereDatacenterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
log := ctrl.LoggerFrom(ctx)
// Fetch the VsphereDatacenter object
vsphereDatacenter := &anywherev1.VSphereDatacenterConfig{}
if err := r.client.Get(ctx, req.NamespacedName, vsphereDatacenter); err != nil {
return ctrl.Result{}, err
}
// Initialize the patch helper
patchHelper, err := patch.NewHelper(vsphereDatacenter, r.client)
if err != nil {
return ctrl.Result{}, err
}
defer func() {
// Always attempt to patch the object and status after each reconciliation.
patchOpts := []patch.Option{}
if reterr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
}
if err := patchHelper.Patch(ctx, vsphereDatacenter, patchOpts...); err != nil {
log.Error(reterr, "Failed to patch vspheredatacenterconfig")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
}()
// There's no need to go any further if the VsphereDatacenterConfig is marked for deletion.
if !vsphereDatacenter.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, vsphereDatacenter, log)
}
result, err := r.reconcile(ctx, vsphereDatacenter, log)
if err != nil {
log.Error(err, "Failed to reconcile VsphereDatacenterConfig")
}
return result, err
}
func (r *VSphereDatacenterReconciler) reconcile(ctx context.Context, vsphereDatacenter *anywherev1.VSphereDatacenterConfig, log logr.Logger) (_ ctrl.Result, reterr error) {
// Set up envs for executing Govc cmd and default values for datacenter config
if err := reconciler.SetupEnvVars(ctx, vsphereDatacenter, r.client); err != nil {
log.Error(err, "Failed to set up env vars and default values for VsphereDatacenterConfig")
return ctrl.Result{}, err
}
if err := r.defaulter.SetDefaultsForDatacenterConfig(ctx, vsphereDatacenter); err != nil {
return ctrl.Result{}, fmt.Errorf("failed setting default values for vsphere datacenter config: %v", err)
}
// Determine if VsphereDatacenterConfig is valid
if err := r.validator.ValidateVCenterConfig(ctx, vsphereDatacenter); err != nil {
log.Error(err, "Failed to validate VsphereDatacenterConfig")
return ctrl.Result{}, err
}
vsphereDatacenter.Status.SpecValid = true
return ctrl.Result{}, nil
}
func (r *VSphereDatacenterReconciler) reconcileDelete(ctx context.Context, vsphereDatacenter *anywherev1.VSphereDatacenterConfig, log logr.Logger) (ctrl.Result, error) {
return ctrl.Result{}, nil
}
| 105 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: controllers/cluster_controller.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
controller "github.com/aws/eks-anywhere/pkg/controller"
clusters "github.com/aws/eks-anywhere/pkg/controller/clusters"
curatedpackages "github.com/aws/eks-anywhere/pkg/curatedpackages"
registrymirror "github.com/aws/eks-anywhere/pkg/registrymirror"
v1alpha10 "github.com/aws/eks-anywhere/release/api/v1alpha1"
logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockPackagesClient is a mock of PackagesClient interface.
type MockPackagesClient struct {
ctrl *gomock.Controller
recorder *MockPackagesClientMockRecorder
}
// MockPackagesClientMockRecorder is the mock recorder for MockPackagesClient.
type MockPackagesClientMockRecorder struct {
mock *MockPackagesClient
}
// NewMockPackagesClient creates a new mock instance.
func NewMockPackagesClient(ctrl *gomock.Controller) *MockPackagesClient {
mock := &MockPackagesClient{ctrl: ctrl}
mock.recorder = &MockPackagesClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPackagesClient) EXPECT() *MockPackagesClientMockRecorder {
return m.recorder
}
// EnableFullLifecycle mocks base method.
func (m *MockPackagesClient) EnableFullLifecycle(ctx context.Context, log logr.Logger, clusterName, kubeConfig string, chart *v1alpha10.Image, registry *registrymirror.RegistryMirror, options ...curatedpackages.PackageControllerClientOpt) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, log, clusterName, kubeConfig, chart, registry}
for _, a := range options {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "EnableFullLifecycle", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// EnableFullLifecycle indicates an expected call of EnableFullLifecycle.
func (mr *MockPackagesClientMockRecorder) EnableFullLifecycle(ctx, log, clusterName, kubeConfig, chart, registry interface{}, options ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, log, clusterName, kubeConfig, chart, registry}, options...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableFullLifecycle", reflect.TypeOf((*MockPackagesClient)(nil).EnableFullLifecycle), varargs...)
}
// Reconcile mocks base method.
func (m *MockPackagesClient) Reconcile(arg0 context.Context, arg1 logr.Logger, arg2 client.Client, arg3 *v1alpha1.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockPackagesClientMockRecorder) Reconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockPackagesClient)(nil).Reconcile), arg0, arg1, arg2, arg3)
}
// ReconcileDelete mocks base method.
func (m *MockPackagesClient) ReconcileDelete(arg0 context.Context, arg1 logr.Logger, arg2 curatedpackages.KubeDeleter, arg3 *v1alpha1.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReconcileDelete", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ReconcileDelete indicates an expected call of ReconcileDelete.
func (mr *MockPackagesClientMockRecorder) ReconcileDelete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileDelete", reflect.TypeOf((*MockPackagesClient)(nil).ReconcileDelete), arg0, arg1, arg2, arg3)
}
// MockProviderClusterReconcilerRegistry is a mock of ProviderClusterReconcilerRegistry interface.
type MockProviderClusterReconcilerRegistry struct {
ctrl *gomock.Controller
recorder *MockProviderClusterReconcilerRegistryMockRecorder
}
// MockProviderClusterReconcilerRegistryMockRecorder is the mock recorder for MockProviderClusterReconcilerRegistry.
type MockProviderClusterReconcilerRegistryMockRecorder struct {
mock *MockProviderClusterReconcilerRegistry
}
// NewMockProviderClusterReconcilerRegistry creates a new mock instance.
func NewMockProviderClusterReconcilerRegistry(ctrl *gomock.Controller) *MockProviderClusterReconcilerRegistry {
mock := &MockProviderClusterReconcilerRegistry{ctrl: ctrl}
mock.recorder = &MockProviderClusterReconcilerRegistryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockProviderClusterReconcilerRegistry) EXPECT() *MockProviderClusterReconcilerRegistryMockRecorder {
return m.recorder
}
// Get mocks base method.
func (m *MockProviderClusterReconcilerRegistry) Get(datacenterKind string) clusters.ProviderClusterReconciler {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", datacenterKind)
ret0, _ := ret[0].(clusters.ProviderClusterReconciler)
return ret0
}
// Get indicates an expected call of Get.
func (mr *MockProviderClusterReconcilerRegistryMockRecorder) Get(datacenterKind interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockProviderClusterReconcilerRegistry)(nil).Get), datacenterKind)
}
// MockAWSIamConfigReconciler is a mock of AWSIamConfigReconciler interface.
type MockAWSIamConfigReconciler struct {
ctrl *gomock.Controller
recorder *MockAWSIamConfigReconcilerMockRecorder
}
// MockAWSIamConfigReconcilerMockRecorder is the mock recorder for MockAWSIamConfigReconciler.
type MockAWSIamConfigReconcilerMockRecorder struct {
mock *MockAWSIamConfigReconciler
}
// NewMockAWSIamConfigReconciler creates a new mock instance.
func NewMockAWSIamConfigReconciler(ctrl *gomock.Controller) *MockAWSIamConfigReconciler {
mock := &MockAWSIamConfigReconciler{ctrl: ctrl}
mock.recorder = &MockAWSIamConfigReconcilerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockAWSIamConfigReconciler) EXPECT() *MockAWSIamConfigReconcilerMockRecorder {
return m.recorder
}
// EnsureCASecret mocks base method.
func (m *MockAWSIamConfigReconciler) EnsureCASecret(ctx context.Context, logger logr.Logger, cluster *v1alpha1.Cluster) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureCASecret", ctx, logger, cluster)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EnsureCASecret indicates an expected call of EnsureCASecret.
func (mr *MockAWSIamConfigReconcilerMockRecorder) EnsureCASecret(ctx, logger, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureCASecret", reflect.TypeOf((*MockAWSIamConfigReconciler)(nil).EnsureCASecret), ctx, logger, cluster)
}
// Reconcile mocks base method.
func (m *MockAWSIamConfigReconciler) Reconcile(ctx context.Context, logger logr.Logger, cluster *v1alpha1.Cluster) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", ctx, logger, cluster)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockAWSIamConfigReconcilerMockRecorder) Reconcile(ctx, logger, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockAWSIamConfigReconciler)(nil).Reconcile), ctx, logger, cluster)
}
// ReconcileDelete mocks base method.
func (m *MockAWSIamConfigReconciler) ReconcileDelete(ctx context.Context, logger logr.Logger, cluster *v1alpha1.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReconcileDelete", ctx, logger, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// ReconcileDelete indicates an expected call of ReconcileDelete.
func (mr *MockAWSIamConfigReconcilerMockRecorder) ReconcileDelete(ctx, logger, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileDelete", reflect.TypeOf((*MockAWSIamConfigReconciler)(nil).ReconcileDelete), ctx, logger, cluster)
}
// MockClusterValidator is a mock of ClusterValidator interface.
type MockClusterValidator struct {
ctrl *gomock.Controller
recorder *MockClusterValidatorMockRecorder
}
// MockClusterValidatorMockRecorder is the mock recorder for MockClusterValidator.
type MockClusterValidatorMockRecorder struct {
mock *MockClusterValidator
}
// NewMockClusterValidator creates a new mock instance.
func NewMockClusterValidator(ctrl *gomock.Controller) *MockClusterValidator {
mock := &MockClusterValidator{ctrl: ctrl}
mock.recorder = &MockClusterValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClusterValidator) EXPECT() *MockClusterValidatorMockRecorder {
return m.recorder
}
// ValidateManagementClusterName mocks base method.
func (m *MockClusterValidator) ValidateManagementClusterName(ctx context.Context, log logr.Logger, cluster *v1alpha1.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateManagementClusterName", ctx, log, cluster)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateManagementClusterName indicates an expected call of ValidateManagementClusterName.
func (mr *MockClusterValidatorMockRecorder) ValidateManagementClusterName(ctx, log, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateManagementClusterName", reflect.TypeOf((*MockClusterValidator)(nil).ValidateManagementClusterName), ctx, log, cluster)
}
| 232 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/controllers (interfaces: Manager)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
http "net/http"
reflect "reflect"
logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
meta "k8s.io/apimachinery/pkg/api/meta"
runtime "k8s.io/apimachinery/pkg/runtime"
rest "k8s.io/client-go/rest"
record "k8s.io/client-go/tools/record"
cache "sigs.k8s.io/controller-runtime/pkg/cache"
client "sigs.k8s.io/controller-runtime/pkg/client"
v1alpha1 "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
healthz "sigs.k8s.io/controller-runtime/pkg/healthz"
manager "sigs.k8s.io/controller-runtime/pkg/manager"
webhook "sigs.k8s.io/controller-runtime/pkg/webhook"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// Add mocks base method.
func (m *MockManager) Add(arg0 manager.Runnable) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Add", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Add indicates an expected call of Add.
func (mr *MockManagerMockRecorder) Add(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockManager)(nil).Add), arg0)
}
// AddHealthzCheck mocks base method.
func (m *MockManager) AddHealthzCheck(arg0 string, arg1 healthz.Checker) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddHealthzCheck", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// AddHealthzCheck indicates an expected call of AddHealthzCheck.
func (mr *MockManagerMockRecorder) AddHealthzCheck(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHealthzCheck", reflect.TypeOf((*MockManager)(nil).AddHealthzCheck), arg0, arg1)
}
// AddMetricsExtraHandler mocks base method.
func (m *MockManager) AddMetricsExtraHandler(arg0 string, arg1 http.Handler) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddMetricsExtraHandler", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// AddMetricsExtraHandler indicates an expected call of AddMetricsExtraHandler.
func (mr *MockManagerMockRecorder) AddMetricsExtraHandler(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddMetricsExtraHandler", reflect.TypeOf((*MockManager)(nil).AddMetricsExtraHandler), arg0, arg1)
}
// AddReadyzCheck mocks base method.
func (m *MockManager) AddReadyzCheck(arg0 string, arg1 healthz.Checker) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddReadyzCheck", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// AddReadyzCheck indicates an expected call of AddReadyzCheck.
func (mr *MockManagerMockRecorder) AddReadyzCheck(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddReadyzCheck", reflect.TypeOf((*MockManager)(nil).AddReadyzCheck), arg0, arg1)
}
// Elected mocks base method.
func (m *MockManager) Elected() <-chan struct{} {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Elected")
ret0, _ := ret[0].(<-chan struct{})
return ret0
}
// Elected indicates an expected call of Elected.
func (mr *MockManagerMockRecorder) Elected() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Elected", reflect.TypeOf((*MockManager)(nil).Elected))
}
// GetAPIReader mocks base method.
func (m *MockManager) GetAPIReader() client.Reader {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAPIReader")
ret0, _ := ret[0].(client.Reader)
return ret0
}
// GetAPIReader indicates an expected call of GetAPIReader.
func (mr *MockManagerMockRecorder) GetAPIReader() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAPIReader", reflect.TypeOf((*MockManager)(nil).GetAPIReader))
}
// GetCache mocks base method.
func (m *MockManager) GetCache() cache.Cache {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCache")
ret0, _ := ret[0].(cache.Cache)
return ret0
}
// GetCache indicates an expected call of GetCache.
func (mr *MockManagerMockRecorder) GetCache() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCache", reflect.TypeOf((*MockManager)(nil).GetCache))
}
// GetClient mocks base method.
func (m *MockManager) GetClient() client.Client {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetClient")
ret0, _ := ret[0].(client.Client)
return ret0
}
// GetClient indicates an expected call of GetClient.
func (mr *MockManagerMockRecorder) GetClient() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockManager)(nil).GetClient))
}
// GetConfig mocks base method.
func (m *MockManager) GetConfig() *rest.Config {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetConfig")
ret0, _ := ret[0].(*rest.Config)
return ret0
}
// GetConfig indicates an expected call of GetConfig.
func (mr *MockManagerMockRecorder) GetConfig() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfig", reflect.TypeOf((*MockManager)(nil).GetConfig))
}
// GetControllerOptions mocks base method.
func (m *MockManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetControllerOptions")
ret0, _ := ret[0].(v1alpha1.ControllerConfigurationSpec)
return ret0
}
// GetControllerOptions indicates an expected call of GetControllerOptions.
func (mr *MockManagerMockRecorder) GetControllerOptions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetControllerOptions", reflect.TypeOf((*MockManager)(nil).GetControllerOptions))
}
// GetEventRecorderFor mocks base method.
func (m *MockManager) GetEventRecorderFor(arg0 string) record.EventRecorder {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEventRecorderFor", arg0)
ret0, _ := ret[0].(record.EventRecorder)
return ret0
}
// GetEventRecorderFor indicates an expected call of GetEventRecorderFor.
func (mr *MockManagerMockRecorder) GetEventRecorderFor(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventRecorderFor", reflect.TypeOf((*MockManager)(nil).GetEventRecorderFor), arg0)
}
// GetFieldIndexer mocks base method.
func (m *MockManager) GetFieldIndexer() client.FieldIndexer {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetFieldIndexer")
ret0, _ := ret[0].(client.FieldIndexer)
return ret0
}
// GetFieldIndexer indicates an expected call of GetFieldIndexer.
func (mr *MockManagerMockRecorder) GetFieldIndexer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFieldIndexer", reflect.TypeOf((*MockManager)(nil).GetFieldIndexer))
}
// GetLogger mocks base method.
func (m *MockManager) GetLogger() logr.Logger {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetLogger")
ret0, _ := ret[0].(logr.Logger)
return ret0
}
// GetLogger indicates an expected call of GetLogger.
func (mr *MockManagerMockRecorder) GetLogger() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockManager)(nil).GetLogger))
}
// GetRESTMapper mocks base method.
func (m *MockManager) GetRESTMapper() meta.RESTMapper {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRESTMapper")
ret0, _ := ret[0].(meta.RESTMapper)
return ret0
}
// GetRESTMapper indicates an expected call of GetRESTMapper.
func (mr *MockManagerMockRecorder) GetRESTMapper() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRESTMapper", reflect.TypeOf((*MockManager)(nil).GetRESTMapper))
}
// GetScheme mocks base method.
func (m *MockManager) GetScheme() *runtime.Scheme {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetScheme")
ret0, _ := ret[0].(*runtime.Scheme)
return ret0
}
// GetScheme indicates an expected call of GetScheme.
func (mr *MockManagerMockRecorder) GetScheme() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheme", reflect.TypeOf((*MockManager)(nil).GetScheme))
}
// GetWebhookServer mocks base method.
func (m *MockManager) GetWebhookServer() *webhook.Server {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetWebhookServer")
ret0, _ := ret[0].(*webhook.Server)
return ret0
}
// GetWebhookServer indicates an expected call of GetWebhookServer.
func (mr *MockManagerMockRecorder) GetWebhookServer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWebhookServer", reflect.TypeOf((*MockManager)(nil).GetWebhookServer))
}
// SetFields mocks base method.
func (m *MockManager) SetFields(arg0 interface{}) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetFields", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetFields indicates an expected call of SetFields.
func (mr *MockManagerMockRecorder) SetFields(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFields", reflect.TypeOf((*MockManager)(nil).SetFields), arg0)
}
// Start mocks base method.
func (m *MockManager) Start(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Start", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Start indicates an expected call of Start.
func (mr *MockManagerMockRecorder) Start(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockManager)(nil).Start), arg0)
}
| 300 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/controller/clusters/registry.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
controller "github.com/aws/eks-anywhere/pkg/controller"
logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
)
// MockProviderClusterReconciler is a mock of ProviderClusterReconciler interface.
type MockProviderClusterReconciler struct {
ctrl *gomock.Controller
recorder *MockProviderClusterReconcilerMockRecorder
}
// MockProviderClusterReconcilerMockRecorder is the mock recorder for MockProviderClusterReconciler.
type MockProviderClusterReconcilerMockRecorder struct {
mock *MockProviderClusterReconciler
}
// NewMockProviderClusterReconciler creates a new mock instance.
func NewMockProviderClusterReconciler(ctrl *gomock.Controller) *MockProviderClusterReconciler {
mock := &MockProviderClusterReconciler{ctrl: ctrl}
mock.recorder = &MockProviderClusterReconcilerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockProviderClusterReconciler) EXPECT() *MockProviderClusterReconcilerMockRecorder {
return m.recorder
}
// Reconcile mocks base method.
func (m *MockProviderClusterReconciler) Reconcile(ctx context.Context, log logr.Logger, cluster *v1alpha1.Cluster) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", ctx, log, cluster)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockProviderClusterReconcilerMockRecorder) Reconcile(ctx, log, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockProviderClusterReconciler)(nil).Reconcile), ctx, log, cluster)
}
// ReconcileWorkerNodes mocks base method.
func (m *MockProviderClusterReconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, cluster *v1alpha1.Cluster) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReconcileWorkerNodes", ctx, log, cluster)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReconcileWorkerNodes indicates an expected call of ReconcileWorkerNodes.
func (mr *MockProviderClusterReconcilerMockRecorder) ReconcileWorkerNodes(ctx, log, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileWorkerNodes", reflect.TypeOf((*MockProviderClusterReconciler)(nil).ReconcileWorkerNodes), ctx, log, cluster)
}
| 69 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: controllers/snow_machineconfig_controller.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
// MockValidator is a mock of Validator interface.
type MockValidator struct {
ctrl *gomock.Controller
recorder *MockValidatorMockRecorder
}
// MockValidatorMockRecorder is the mock recorder for MockValidator.
type MockValidatorMockRecorder struct {
mock *MockValidator
}
// NewMockValidator creates a new mock instance.
func NewMockValidator(ctrl *gomock.Controller) *MockValidator {
mock := &MockValidator{ctrl: ctrl}
mock.recorder = &MockValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockValidator) EXPECT() *MockValidatorMockRecorder {
return m.recorder
}
// ValidateEC2ImageExistsOnDevice mocks base method.
func (m_2 *MockValidator) ValidateEC2ImageExistsOnDevice(ctx context.Context, m *v1alpha1.SnowMachineConfig) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "ValidateEC2ImageExistsOnDevice", ctx, m)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateEC2ImageExistsOnDevice indicates an expected call of ValidateEC2ImageExistsOnDevice.
func (mr *MockValidatorMockRecorder) ValidateEC2ImageExistsOnDevice(ctx, m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateEC2ImageExistsOnDevice", reflect.TypeOf((*MockValidator)(nil).ValidateEC2ImageExistsOnDevice), ctx, m)
}
// ValidateEC2SshKeyNameExists mocks base method.
func (m_2 *MockValidator) ValidateEC2SshKeyNameExists(ctx context.Context, m *v1alpha1.SnowMachineConfig) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "ValidateEC2SshKeyNameExists", ctx, m)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateEC2SshKeyNameExists indicates an expected call of ValidateEC2SshKeyNameExists.
func (mr *MockValidatorMockRecorder) ValidateEC2SshKeyNameExists(ctx, m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateEC2SshKeyNameExists", reflect.TypeOf((*MockValidator)(nil).ValidateEC2SshKeyNameExists), ctx, m)
}
| 65 |
eks-anywhere | aws | Go | package configsources
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
)
// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value
// for Enable Endpoint Discovery.
type EnableEndpointDiscoveryProvider interface {
GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error)
}
// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice.
// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs,
// and error if one is encountered.
func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) {
for _, cfg := range configs {
if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok {
value, found, err = p.GetEnableEndpointDiscovery(ctx)
if err != nil || found {
break
}
}
}
return
}
// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint.
type UseDualStackEndpointProvider interface {
GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error)
}
// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice.
// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) {
for _, cfg := range configs {
if p, ok := cfg.(UseDualStackEndpointProvider); ok {
value, found, err = p.GetUseDualStackEndpoint(ctx)
if err != nil || found {
break
}
}
}
return
}
// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint.
type UseFIPSEndpointProvider interface {
GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error)
}
// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice.
// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) {
for _, cfg := range configs {
if p, ok := cfg.(UseFIPSEndpointProvider); ok {
value, found, err = p.GetUseFIPSEndpoint(ctx)
if err != nil || found {
break
}
}
}
return
}
| 67 |
eks-anywhere | aws | Go | // Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package configsources
// goModuleVersion is the tagged release for this module.
const goModuleVersion = "1.1.21"
| 7 |
eks-anywhere | aws | Go | package configtesting
import (
"github.com/aws/aws-sdk-go-v2/config"
internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
)
// EnableEndpointDiscoveryProvider Assertions.
var (
_ internalConfig.EnableEndpointDiscoveryProvider = &config.EnvConfig{}
_ internalConfig.EnableEndpointDiscoveryProvider = &config.SharedConfig{}
)
// EnableEndpointDiscoveryProvider Assertions.
var (
_ internalConfig.EnableEndpointDiscoveryProvider = &config.EnvConfig{}
_ internalConfig.EnableEndpointDiscoveryProvider = &config.SharedConfig{}
)
// UseDualStackEndpointProvider Assertions.
var (
_ internalConfig.UseDualStackEndpointProvider = &config.EnvConfig{}
_ internalConfig.UseDualStackEndpointProvider = &config.SharedConfig{}
_ internalConfig.UseDualStackEndpointProvider = &config.LoadOptions{}
)
// UseDualStackEndpointProvider Assertions.
var (
_ internalConfig.UseFIPSEndpointProvider = &config.EnvConfig{}
_ internalConfig.UseFIPSEndpointProvider = &config.SharedConfig{}
_ internalConfig.UseFIPSEndpointProvider = &config.LoadOptions{}
)
| 33 |
eks-anywhere | aws | Go | // Package configtesting is used for config source assertions.
package configtesting
| 3 |
eks-anywhere | aws | Go | // Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package configtesting
// goModuleVersion is the tagged release for this module.
const goModuleVersion = "tip"
| 7 |
eks-anywhere | aws | Go | package endpoints
import (
"fmt"
"regexp"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
)
const (
defaultProtocol = "https"
defaultSigner = "v4"
)
var (
protocolPriority = []string{"https", "http"}
signerPriority = []string{"v4"}
)
// Options provide configuration needed to direct how endpoints are resolved.
type Options struct {
// Disable usage of HTTPS (TLS / SSL)
DisableHTTPS bool
}
// Partitions is a slice of partition.
type Partitions []Partition
// ResolveEndpoint resolves a service endpoint for the given region and options.
func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
if len(ps) == 0 {
return aws.Endpoint{}, fmt.Errorf("no partitions found")
}
for i := 0; i < len(ps); i++ {
if !ps[i].canResolveEndpoint(region) {
continue
}
return ps[i].ResolveEndpoint(region, opts)
}
// fallback to first partition format to use when resolving the endpoint.
return ps[0].ResolveEndpoint(region, opts)
}
// Partition is an AWS partition description for a service and its' region endpoints.
type Partition struct {
ID string
RegionRegex *regexp.Regexp
PartitionEndpoint string
IsRegionalized bool
Defaults Endpoint
Endpoints Endpoints
}
func (p Partition) canResolveEndpoint(region string) bool {
_, ok := p.Endpoints[region]
return ok || p.RegionRegex.MatchString(region)
}
// ResolveEndpoint resolves and service endpoint for the given region and options.
func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
region = p.PartitionEndpoint
}
e, _ := p.endpointForRegion(region)
return e.resolve(p.ID, region, p.Defaults, options), nil
}
func (p Partition) endpointForRegion(region string) (Endpoint, bool) {
if e, ok := p.Endpoints[region]; ok {
return e, true
}
if !p.IsRegionalized {
return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint
}
// Unable to find any matching endpoint, return
// blank that will be used for generic endpoint creation.
return Endpoint{}, false
}
// Endpoints is a map of service config regions to endpoints.
type Endpoints map[string]Endpoint
// CredentialScope is the credential scope of a region and service.
type CredentialScope struct {
Region string
Service string
}
// Endpoint is a service endpoint description.
type Endpoint struct {
// True if the endpoint cannot be resolved for this partition/region/service
Unresolveable aws.Ternary
Hostname string
Protocols []string
CredentialScope CredentialScope
SignatureVersions []string `json:"signatureVersions"`
}
func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint {
var merged Endpoint
merged.mergeIn(def)
merged.mergeIn(e)
e = merged
var u string
if e.Unresolveable != aws.TrueTernary {
// Only attempt to resolve the endpoint if it can be resolved.
hostname := strings.Replace(e.Hostname, "{region}", region, 1)
scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
u = scheme + "://" + hostname
}
signingRegion := e.CredentialScope.Region
if len(signingRegion) == 0 {
signingRegion = region
}
signingName := e.CredentialScope.Service
return aws.Endpoint{
URL: u,
PartitionID: partition,
SigningRegion: signingRegion,
SigningName: signingName,
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
}
}
func (e *Endpoint) mergeIn(other Endpoint) {
if other.Unresolveable != aws.UnknownTernary {
e.Unresolveable = other.Unresolveable
}
if len(other.Hostname) > 0 {
e.Hostname = other.Hostname
}
if len(other.Protocols) > 0 {
e.Protocols = other.Protocols
}
if len(other.CredentialScope.Region) > 0 {
e.CredentialScope.Region = other.CredentialScope.Region
}
if len(other.CredentialScope.Service) > 0 {
e.CredentialScope.Service = other.CredentialScope.Service
}
if len(other.SignatureVersions) > 0 {
e.SignatureVersions = other.SignatureVersions
}
}
func getEndpointScheme(protocols []string, disableHTTPS bool) string {
if disableHTTPS {
return "http"
}
return getByPriority(protocols, protocolPriority, defaultProtocol)
}
func getByPriority(s []string, p []string, def string) string {
if len(s) == 0 {
return def
}
for i := 0; i < len(p); i++ {
for j := 0; j < len(s); j++ {
if s[j] == p[i] {
return s[j]
}
}
}
return s[0]
}
| 184 |
eks-anywhere | aws | Go | package endpoints
import (
"reflect"
"regexp"
"testing"
"github.com/aws/aws-sdk-go-v2/aws"
)
func TestEndpointResolve(t *testing.T) {
defs := Endpoint{
Hostname: "service.{region}.amazonaws.com",
SignatureVersions: []string{"v4"},
}
e := Endpoint{
Protocols: []string{"http", "https"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Region: "us-west-2",
Service: "service",
},
}
resolved := e.resolve("aws", "us-west-2", defs, Options{})
if e, a := "https://service.us-west-2.amazonaws.com", resolved.URL; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "aws", resolved.PartitionID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "service", resolved.SigningName; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "us-west-2", resolved.SigningRegion; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "v4", resolved.SigningMethod; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestEndpointMergeIn(t *testing.T) {
expected := Endpoint{
Hostname: "other hostname",
Protocols: []string{"http"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Region: "region",
Service: "service",
},
}
actual := Endpoint{}
actual.mergeIn(Endpoint{
Hostname: "other hostname",
Protocols: []string{"http"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Region: "region",
Service: "service",
},
})
if e, a := expected, actual; !reflect.DeepEqual(e, a) {
t.Errorf("expect %v, got %v", e, a)
}
}
var testPartitions = Partitions{
{
ID: "part-id-1",
RegionRegex: func() *regexp.Regexp {
reg, _ := regexp.Compile("^(us)\\-\\w+\\-\\d+$")
return reg
}(),
Defaults: Endpoint{
Hostname: "service.{region}.amazonaws.com",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
IsRegionalized: true,
Endpoints: Endpoints{
"us-west-1": {},
"us-west-1-alt": {
Hostname: "service-alt.us-west-1.amazonaws.com",
Protocols: []string{"http"},
SignatureVersions: []string{"vFoo"},
CredentialScope: CredentialScope{
Region: "us-west-1",
Service: "foo",
},
},
},
},
{
ID: "part-id-2",
RegionRegex: func() *regexp.Regexp {
reg, _ := regexp.Compile("^(cn)\\-\\w+\\-\\d+$")
return reg
}(),
Defaults: Endpoint{
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Service: "foo",
},
},
IsRegionalized: false,
PartitionEndpoint: "partition",
Endpoints: Endpoints{
"partition": {
Hostname: "some-global-thing.amazonaws.com.cn",
CredentialScope: CredentialScope{
Region: "cn-east-1",
},
},
"fips-partition": {
Hostname: "some-global-thing-fips.amazonaws.com.cn",
CredentialScope: CredentialScope{
Region: "cn-east-1",
},
},
},
},
{
ID: "part-id-3",
RegionRegex: func() *regexp.Regexp {
reg, _ := regexp.Compile("^(eu)\\-\\w+\\-\\d+$")
return reg
}(),
Defaults: Endpoint{
Hostname: "service.{region}.amazonaws.com",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Service: "foo",
},
},
IsRegionalized: true,
},
}
func TestResolveEndpoint(t *testing.T) {
cases := map[string]struct {
Region string
Options Options
Expected aws.Endpoint
}{
"modeled region with no endpoint overrides": {
Region: "us-west-1",
Expected: aws.Endpoint{
PartitionID: "part-id-1",
URL: "https://service.us-west-1.amazonaws.com",
SigningRegion: "us-west-1",
SigningMethod: "v4",
},
},
"modeled region with no endpoint overrides and https disabled": {
Region: "us-west-1",
Options: Options{DisableHTTPS: true},
Expected: aws.Endpoint{
PartitionID: "part-id-1",
URL: "http://service.us-west-1.amazonaws.com",
SigningRegion: "us-west-1",
SigningMethod: "v4",
},
},
"modeled region with endpoint overrides": {
Region: "us-west-1-alt",
Expected: aws.Endpoint{
PartitionID: "part-id-1",
URL: "http://service-alt.us-west-1.amazonaws.com",
SigningRegion: "us-west-1",
SigningName: "foo",
SigningMethod: "vFoo",
},
},
"partition endpoint": {
Region: "cn-central-1",
Expected: aws.Endpoint{
PartitionID: "part-id-2",
URL: "https://some-global-thing.amazonaws.com.cn",
SigningRegion: "cn-east-1",
SigningName: "foo",
SigningMethod: "v4",
},
},
"specified partition endpoint": {
Region: "partition",
Expected: aws.Endpoint{
PartitionID: "part-id-2",
URL: "https://some-global-thing.amazonaws.com.cn",
SigningRegion: "cn-east-1",
SigningName: "foo",
SigningMethod: "v4",
},
},
"fips partition endpoint": {
Region: "fips-partition",
Expected: aws.Endpoint{
PartitionID: "part-id-2",
URL: "https://some-global-thing-fips.amazonaws.com.cn",
SigningRegion: "cn-east-1",
SigningName: "foo",
SigningMethod: "v4",
},
},
"region with unmodeled endpoints": {
Region: "eu-west-1",
Expected: aws.Endpoint{
PartitionID: "part-id-3",
URL: "https://service.eu-west-1.amazonaws.com",
SigningRegion: "eu-west-1",
SigningName: "foo",
SigningMethod: "v4",
},
},
}
for name, tt := range cases {
t.Run(name, func(t *testing.T) {
endpoint, err := testPartitions.ResolveEndpoint(tt.Region, tt.Options)
if err != nil {
t.Errorf("expected no error, got %v", err)
}
if e, a := tt.Expected, endpoint; !reflect.DeepEqual(e, a) {
t.Errorf("expected %v, got %v", e, a)
}
})
}
}
| 234 |
eks-anywhere | aws | Go | package endpoints
import (
"fmt"
"regexp"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/smithy-go/logging"
)
// DefaultKey is a compound map key of a variant and other values.
type DefaultKey struct {
Variant EndpointVariant
ServiceVariant ServiceVariant
}
// EndpointKey is a compound map key of a region and associated variant value.
type EndpointKey struct {
Region string
Variant EndpointVariant
ServiceVariant ServiceVariant
}
// EndpointVariant is a bit field to describe the endpoints attributes.
type EndpointVariant uint64
const (
// FIPSVariant indicates that the endpoint is FIPS capable.
FIPSVariant EndpointVariant = 1 << (64 - 1 - iota)
// DualStackVariant indicates that the endpoint is DualStack capable.
DualStackVariant
)
// ServiceVariant is a bit field to describe the service endpoint attributes.
type ServiceVariant uint64
const (
defaultProtocol = "https"
defaultSigner = "v4"
)
var (
protocolPriority = []string{"https", "http"}
signerPriority = []string{"v4", "s3v4"}
)
// Options provide configuration needed to direct how endpoints are resolved.
type Options struct {
// Logger is a logging implementation that log events should be sent to.
Logger logging.Logger
// LogDeprecated indicates that deprecated endpoints should be logged to the provided logger.
LogDeprecated bool
// ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority
// over the region name passed to the ResolveEndpoint call.
ResolvedRegion string
// Disable usage of HTTPS (TLS / SSL)
DisableHTTPS bool
// Instruct the resolver to use a service endpoint that supports dual-stack.
// If a service does not have a dual-stack endpoint an error will be returned by the resolver.
UseDualStackEndpoint aws.DualStackEndpointState
// Instruct the resolver to use a service endpoint that supports FIPS.
// If a service does not have a FIPS endpoint an error will be returned by the resolver.
UseFIPSEndpoint aws.FIPSEndpointState
// ServiceVariant is a bitfield of service specified endpoint variant data.
ServiceVariant ServiceVariant
}
// GetEndpointVariant returns the EndpointVariant for the variant associated options.
func (o Options) GetEndpointVariant() (v EndpointVariant) {
if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled {
v |= DualStackVariant
}
if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled {
v |= FIPSVariant
}
return v
}
// Partitions is a slice of partition.
type Partitions []Partition
// ResolveEndpoint resolves a service endpoint for the given region and options.
func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
if len(ps) == 0 {
return aws.Endpoint{}, fmt.Errorf("no partitions found")
}
if opts.Logger == nil {
opts.Logger = logging.Nop{}
}
if len(opts.ResolvedRegion) > 0 {
region = opts.ResolvedRegion
}
for i := 0; i < len(ps); i++ {
if !ps[i].canResolveEndpoint(region, opts) {
continue
}
return ps[i].ResolveEndpoint(region, opts)
}
// fallback to first partition format to use when resolving the endpoint.
return ps[0].ResolveEndpoint(region, opts)
}
// Partition is an AWS partition description for a service and its' region endpoints.
type Partition struct {
ID string
RegionRegex *regexp.Regexp
PartitionEndpoint string
IsRegionalized bool
Defaults map[DefaultKey]Endpoint
Endpoints Endpoints
}
func (p Partition) canResolveEndpoint(region string, opts Options) bool {
_, ok := p.Endpoints[EndpointKey{
Region: region,
Variant: opts.GetEndpointVariant(),
}]
return ok || p.RegionRegex.MatchString(region)
}
// ResolveEndpoint resolves and service endpoint for the given region and options.
func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
region = p.PartitionEndpoint
}
endpoints := p.Endpoints
variant := options.GetEndpointVariant()
serviceVariant := options.ServiceVariant
defaults := p.Defaults[DefaultKey{
Variant: variant,
ServiceVariant: serviceVariant,
}]
return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options)
}
func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint {
key := EndpointKey{
Region: region,
Variant: variant,
}
if e, ok := endpoints[key]; ok {
return e
}
if !p.IsRegionalized {
return endpoints[EndpointKey{
Region: p.PartitionEndpoint,
Variant: variant,
ServiceVariant: serviceVariant,
}]
}
// Unable to find any matching endpoint, return
// blank that will be used for generic endpoint creation.
return Endpoint{}
}
// Endpoints is a map of service config regions to endpoints.
type Endpoints map[EndpointKey]Endpoint
// CredentialScope is the credential scope of a region and service.
type CredentialScope struct {
Region string
Service string
}
// Endpoint is a service endpoint description.
type Endpoint struct {
// True if the endpoint cannot be resolved for this partition/region/service
Unresolveable aws.Ternary
Hostname string
Protocols []string
CredentialScope CredentialScope
SignatureVersions []string
// Indicates that this endpoint is deprecated.
Deprecated aws.Ternary
}
// IsZero returns whether the endpoint structure is an empty (zero) value.
func (e Endpoint) IsZero() bool {
switch {
case e.Unresolveable != aws.UnknownTernary:
return false
case len(e.Hostname) != 0:
return false
case len(e.Protocols) != 0:
return false
case e.CredentialScope != (CredentialScope{}):
return false
case len(e.SignatureVersions) != 0:
return false
}
return true
}
func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) {
var merged Endpoint
merged.mergeIn(def)
merged.mergeIn(e)
e = merged
if e.IsZero() {
return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region)
}
var u string
if e.Unresolveable != aws.TrueTernary {
// Only attempt to resolve the endpoint if it can be resolved.
hostname := strings.Replace(e.Hostname, "{region}", region, 1)
scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
u = scheme + "://" + hostname
}
signingRegion := e.CredentialScope.Region
if len(signingRegion) == 0 {
signingRegion = region
}
signingName := e.CredentialScope.Service
if e.Deprecated == aws.TrueTernary && options.LogDeprecated {
options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u)
}
return aws.Endpoint{
URL: u,
PartitionID: partition,
SigningRegion: signingRegion,
SigningName: signingName,
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
}, nil
}
func (e *Endpoint) mergeIn(other Endpoint) {
if other.Unresolveable != aws.UnknownTernary {
e.Unresolveable = other.Unresolveable
}
if len(other.Hostname) > 0 {
e.Hostname = other.Hostname
}
if len(other.Protocols) > 0 {
e.Protocols = other.Protocols
}
if len(other.CredentialScope.Region) > 0 {
e.CredentialScope.Region = other.CredentialScope.Region
}
if len(other.CredentialScope.Service) > 0 {
e.CredentialScope.Service = other.CredentialScope.Service
}
if len(other.SignatureVersions) > 0 {
e.SignatureVersions = other.SignatureVersions
}
if other.Deprecated != aws.UnknownTernary {
e.Deprecated = other.Deprecated
}
}
func getEndpointScheme(protocols []string, disableHTTPS bool) string {
if disableHTTPS {
return "http"
}
return getByPriority(protocols, protocolPriority, defaultProtocol)
}
func getByPriority(s []string, p []string, def string) string {
if len(s) == 0 {
return def
}
for i := 0; i < len(p); i++ {
for j := 0; j < len(s); j++ {
if s[j] == p[i] {
return s[j]
}
}
}
return s[0]
}
| 303 |
eks-anywhere | aws | Go | package endpoints
import (
"reflect"
"testing"
)
func TestEndpointResolve(t *testing.T) {
defs := Endpoint{
Hostname: "service.{region}.amazonaws.com",
SignatureVersions: []string{"v4"},
}
e := Endpoint{
Protocols: []string{"http", "https"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Region: "us-west-2",
Service: "service",
},
}
resolved, err := e.resolve("aws", "us-west-2", defs, Options{})
if err != nil {
t.Errorf("expect no error, got %v", err)
}
if e, a := "https://service.us-west-2.amazonaws.com", resolved.URL; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "aws", resolved.PartitionID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "service", resolved.SigningName; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "us-west-2", resolved.SigningRegion; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "v4", resolved.SigningMethod; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestEndpointMergeIn(t *testing.T) {
expected := Endpoint{
Hostname: "other hostname",
Protocols: []string{"http"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Region: "region",
Service: "service",
},
}
actual := Endpoint{}
actual.mergeIn(Endpoint{
Hostname: "other hostname",
Protocols: []string{"http"},
SignatureVersions: []string{"v4"},
CredentialScope: CredentialScope{
Region: "region",
Service: "service",
},
})
if e, a := expected, actual; !reflect.DeepEqual(e, a) {
t.Errorf("expect %v, got %v", e, a)
}
}
| 71 |
eks-anywhere | aws | Go | // Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package endpoints
// goModuleVersion is the tagged release for this module.
const goModuleVersion = "2.4.15"
| 7 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
internalConfig "github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/internal/configsources"
smithy "github.com/aws/smithy-go"
smithydocument "github.com/aws/smithy-go/document"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net"
"net/http"
"time"
)
const ServiceID = "Snowball Device"
const ServiceAPIVersion = "2017-11-27"
// Client provides the API client to make operations call for AWS Snowball Device.
type Client struct {
options Options
}
// New returns an initialized Client based on the functional options. Provide
// additional functional options to further configure the behavior of the client,
// such as changing the client's endpoint or adding custom middleware behavior.
func New(options Options, optFns ...func(*Options)) *Client {
options = options.Copy()
resolveDefaultLogger(&options)
setResolvedDefaultsMode(&options)
resolveRetryer(&options)
resolveHTTPClient(&options)
resolveHTTPSignerV4(&options)
resolveDefaultEndpointConfiguration(&options)
for _, fn := range optFns {
fn(&options)
}
client := &Client{
options: options,
}
return client
}
type Options struct {
// Set of options to modify how an operation is invoked. These apply to all
// operations invoked for this client. Use functional options on operation call to
// modify this list for per operation behavior.
APIOptions []func(*middleware.Stack) error
// Configures the events that will be sent to the configured logger.
ClientLogMode aws.ClientLogMode
// The credentials object to use when signing requests.
Credentials aws.CredentialsProvider
// The configuration DefaultsMode that the SDK should use when constructing the
// clients initial default settings.
DefaultsMode aws.DefaultsMode
// The endpoint options to be used when attempting to resolve an endpoint.
EndpointOptions EndpointResolverOptions
// The service endpoint resolver.
EndpointResolver EndpointResolver
// Signature Version 4 (SigV4) Signer
HTTPSignerV4 HTTPSignerV4
// The logger writer interface to write logging messages to.
Logger logging.Logger
// The region to send requests to. (Required)
Region string
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
// per operation call's retry max attempts. When creating a new API Clients this
// member will only be used if the Retryer Options member is nil. This value will
// be ignored if Retryer is not nil. If specified in an operation call's functional
// options with a value that is different than the constructed client's Options,
// the Client's Retryer will be wrapped to use the operation's specific
// RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
// Retryer option is not also specified. When creating a new API Clients this
// member will only be used if the Retryer Options member is nil. This value will
// be ignored if Retryer is not nil. Currently does not support per operation call
// overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer. The kind of
// default retry created by the API client can be changed with the RetryMode
// option.
Retryer aws.Retryer
// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
// to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You
// should not populate this structure programmatically, or rely on the values here
// within your applications.
RuntimeEnvironment aws.RuntimeEnvironment
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
// value was at that point in time. Currently does not support per operation call
// overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
// implementation if nil.
HTTPClient HTTPClient
}
// WithAPIOptions returns a functional option for setting the Client's APIOptions
// option.
func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
return func(o *Options) {
o.APIOptions = append(o.APIOptions, optFns...)
}
}
// WithEndpointResolver returns a functional option for setting the Client's
// EndpointResolver option.
func WithEndpointResolver(v EndpointResolver) func(*Options) {
return func(o *Options) {
o.EndpointResolver = v
}
}
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// Copy creates a clone where the APIOptions list is deep copied.
func (o Options) Copy() Options {
to := o
to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
copy(to.APIOptions, o.APIOptions)
return to
}
func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
ctx = middleware.ClearStackValues(ctx)
stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
options := c.options.Copy()
for _, fn := range optFns {
fn(&options)
}
finalizeRetryMaxAttemptOptions(&options, *c)
finalizeClientEndpointResolverOptions(&options)
for _, fn := range stackFns {
if err := fn(stack, options); err != nil {
return nil, metadata, err
}
}
for _, fn := range options.APIOptions {
if err := fn(stack); err != nil {
return nil, metadata, err
}
}
handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
result, metadata, err = handler.Handle(ctx, params)
if err != nil {
err = &smithy.OperationError{
ServiceID: ServiceID,
OperationName: opID,
Err: err,
}
}
return result, metadata, err
}
type noSmithyDocumentSerde = smithydocument.NoSerde
func resolveDefaultLogger(o *Options) {
if o.Logger != nil {
return
}
o.Logger = logging.Nop{}
}
func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
return middleware.AddSetLoggerMiddleware(stack, o.Logger)
}
func setResolvedDefaultsMode(o *Options) {
if len(o.resolvedDefaultsMode) > 0 {
return
}
var mode aws.DefaultsMode
mode.SetFromString(string(o.DefaultsMode))
if mode == aws.DefaultsModeAuto {
mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
}
o.resolvedDefaultsMode = mode
}
// NewFromConfig returns a new client from the provided config.
func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
opts := Options{
Region: cfg.Region,
DefaultsMode: cfg.DefaultsMode,
RuntimeEnvironment: cfg.RuntimeEnvironment,
HTTPClient: cfg.HTTPClient,
Credentials: cfg.Credentials,
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSRetryMaxAttempts(cfg, &opts)
resolveAWSRetryMode(cfg, &opts)
resolveAWSEndpointResolver(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
resolveUseFIPSEndpoint(cfg, &opts)
return New(opts, optFns...)
}
func resolveHTTPClient(o *Options) {
var buildable *awshttp.BuildableClient
if o.HTTPClient != nil {
var ok bool
buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
if !ok {
return
}
} else {
buildable = awshttp.NewBuildableClient()
}
modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
if err == nil {
buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
dialer.Timeout = dialerTimeout
}
})
buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
transport.TLSHandshakeTimeout = tlsHandshakeTimeout
}
})
}
o.HTTPClient = buildable
}
func resolveRetryer(o *Options) {
if o.Retryer != nil {
return
}
if len(o.RetryMode) == 0 {
modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
if err == nil {
o.RetryMode = modeConfig.RetryMode
}
}
if len(o.RetryMode) == 0 {
o.RetryMode = aws.RetryModeStandard
}
var standardOptions []func(*retry.StandardOptions)
if v := o.RetryMaxAttempts; v != 0 {
standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
so.MaxAttempts = v
})
}
switch o.RetryMode {
case aws.RetryModeAdaptive:
var adaptiveOptions []func(*retry.AdaptiveModeOptions)
if len(standardOptions) != 0 {
adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
})
}
o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
default:
o.Retryer = retry.NewStandard(standardOptions...)
}
}
func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
if cfg.Retryer == nil {
return
}
o.Retryer = cfg.Retryer()
}
func resolveAWSRetryMode(cfg aws.Config, o *Options) {
if len(cfg.RetryMode) == 0 {
return
}
o.RetryMode = cfg.RetryMode
}
func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
if cfg.RetryMaxAttempts == 0 {
return
}
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
}
func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
return
}
o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver())
}
func addClientUserAgent(stack *middleware.Stack) error {
return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "snowballdevice", goModuleVersion)(stack)
}
func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
CredentialsProvider: o.Credentials,
Signer: o.HTTPSignerV4,
LogSigning: o.ClientLogMode.IsSigning(),
})
return stack.Finalize.Add(mw, middleware.After)
}
type HTTPSignerV4 interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
}
func resolveHTTPSignerV4(o *Options) {
if o.HTTPSignerV4 != nil {
return
}
o.HTTPSignerV4 = newDefaultV4Signer(*o)
}
func newDefaultV4Signer(o Options) *v4.Signer {
return v4.NewSigner(func(so *v4.SignerOptions) {
so.Logger = o.Logger
so.LogSigning = o.ClientLogMode.IsSigning()
})
}
func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
mo := retry.AddRetryMiddlewaresOptions{
Retryer: o.Retryer,
LogRetryAttempts: o.ClientLogMode.IsRetries(),
}
return retry.AddRetryMiddlewares(stack, mo)
}
// resolves dual-stack endpoint configuration.
func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
if len(cfg.ConfigSources) == 0 {
return nil
}
value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
if err != nil {
return err
}
if found {
o.EndpointOptions.UseDualStackEndpoint = value
}
return nil
}
// resolves FIPS endpoint configuration.
func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
if len(cfg.ConfigSources) == 0 {
return nil
}
value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
if err != nil {
return err
}
if found {
o.EndpointOptions.UseFIPSEndpoint = value
}
return nil
}
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
return awshttp.AddResponseErrorMiddleware(stack)
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
LogRequest: o.ClientLogMode.IsRequest(),
LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
LogResponse: o.ClientLogMode.IsResponse(),
LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
}, middleware.After)
}
| 434 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io/ioutil"
"net/http"
"strings"
"testing"
)
func TestClient_resolveRetryOptions(t *testing.T) {
nopClient := smithyhttp.ClientDoFunc(func(_ *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: http.Header{},
Body: ioutil.NopCloser(strings.NewReader("")),
}, nil
})
cases := map[string]struct {
defaultsMode aws.DefaultsMode
retryer aws.Retryer
retryMaxAttempts int
opRetryMaxAttempts *int
retryMode aws.RetryMode
expectClientRetryMode aws.RetryMode
expectClientMaxAttempts int
expectOpMaxAttempts int
}{
"defaults": {
defaultsMode: aws.DefaultsModeStandard,
expectClientRetryMode: aws.RetryModeStandard,
expectClientMaxAttempts: 3,
expectOpMaxAttempts: 3,
},
"custom default retry": {
retryMode: aws.RetryModeAdaptive,
retryMaxAttempts: 10,
expectClientRetryMode: aws.RetryModeAdaptive,
expectClientMaxAttempts: 10,
expectOpMaxAttempts: 10,
},
"custom op max attempts": {
retryMode: aws.RetryModeAdaptive,
retryMaxAttempts: 10,
opRetryMaxAttempts: aws.Int(2),
expectClientRetryMode: aws.RetryModeAdaptive,
expectClientMaxAttempts: 10,
expectOpMaxAttempts: 2,
},
"custom op no change max attempts": {
retryMode: aws.RetryModeAdaptive,
retryMaxAttempts: 10,
opRetryMaxAttempts: aws.Int(10),
expectClientRetryMode: aws.RetryModeAdaptive,
expectClientMaxAttempts: 10,
expectOpMaxAttempts: 10,
},
"custom op 0 max attempts": {
retryMode: aws.RetryModeAdaptive,
retryMaxAttempts: 10,
opRetryMaxAttempts: aws.Int(0),
expectClientRetryMode: aws.RetryModeAdaptive,
expectClientMaxAttempts: 10,
expectOpMaxAttempts: 10,
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
client := NewFromConfig(aws.Config{
DefaultsMode: c.defaultsMode,
Retryer: func() func() aws.Retryer {
if c.retryer == nil {
return nil
}
return func() aws.Retryer { return c.retryer }
}(),
HTTPClient: nopClient,
RetryMaxAttempts: c.retryMaxAttempts,
RetryMode: c.retryMode,
})
if e, a := c.expectClientRetryMode, client.options.RetryMode; e != a {
t.Errorf("expect %v retry mode, got %v", e, a)
}
if e, a := c.expectClientMaxAttempts, client.options.Retryer.MaxAttempts(); e != a {
t.Errorf("expect %v max attempts, got %v", e, a)
}
_, _, err := client.invokeOperation(context.Background(), "mockOperation", struct{}{},
[]func(*Options){
func(o *Options) {
if c.opRetryMaxAttempts == nil {
return
}
o.RetryMaxAttempts = *c.opRetryMaxAttempts
},
},
func(s *middleware.Stack, o Options) error {
s.Initialize.Clear()
s.Serialize.Clear()
s.Build.Clear()
s.Finalize.Clear()
s.Deserialize.Clear()
if e, a := c.expectOpMaxAttempts, o.Retryer.MaxAttempts(); e != a {
t.Errorf("expect %v op max attempts, got %v", e, a)
}
return nil
})
if err != nil {
t.Fatalf("expect no operation error, got %v", err)
}
})
}
}
| 124 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) CheckForUpdates(ctx context.Context, params *CheckForUpdatesInput, optFns ...func(*Options)) (*CheckForUpdatesOutput, error) {
if params == nil {
params = &CheckForUpdatesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CheckForUpdates", params, optFns, c.addOperationCheckForUpdatesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CheckForUpdatesOutput)
out.ResultMetadata = metadata
return out, nil
}
type CheckForUpdatesInput struct {
noSmithyDocumentSerde
}
type CheckForUpdatesOutput struct {
InstalledVersion *string
LatestVersion *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCheckForUpdatesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCheckForUpdates{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCheckForUpdates{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCheckForUpdates(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCheckForUpdates(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "CheckForUpdates",
}
}
| 111 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) ConfigureAutoUpdateStrategy(ctx context.Context, params *ConfigureAutoUpdateStrategyInput, optFns ...func(*Options)) (*ConfigureAutoUpdateStrategyOutput, error) {
if params == nil {
params = &ConfigureAutoUpdateStrategyInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ConfigureAutoUpdateStrategy", params, optFns, c.addOperationConfigureAutoUpdateStrategyMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ConfigureAutoUpdateStrategyOutput)
out.ResultMetadata = metadata
return out, nil
}
type ConfigureAutoUpdateStrategyInput struct {
AutoCheck bool
AutoCheckFrequency *string
AutoDownload bool
AutoDownloadFrequency *string
AutoInstall bool
AutoInstallFrequency *string
AutoReboot bool
noSmithyDocumentSerde
}
type ConfigureAutoUpdateStrategyOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationConfigureAutoUpdateStrategyMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpConfigureAutoUpdateStrategy{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpConfigureAutoUpdateStrategy{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opConfigureAutoUpdateStrategy(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opConfigureAutoUpdateStrategy(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "ConfigureAutoUpdateStrategy",
}
}
| 121 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) CreateAutoStartConfiguration(ctx context.Context, params *CreateAutoStartConfigurationInput, optFns ...func(*Options)) (*CreateAutoStartConfigurationOutput, error) {
if params == nil {
params = &CreateAutoStartConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateAutoStartConfiguration", params, optFns, c.addOperationCreateAutoStartConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateAutoStartConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateAutoStartConfigurationInput struct {
// This member is required.
IpAddressAssignment types.IpAddressAssignment
// This member is required.
LaunchTemplateId *string
// This member is required.
PhysicalConnectorType types.PhysicalConnectorType
LaunchTemplateVersion *string
StaticIpAddressConfiguration *types.StaticIpAddressConfiguration
noSmithyDocumentSerde
}
type CreateAutoStartConfigurationOutput struct {
AutoStartConfiguration *types.AutoStartConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateAutoStartConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateAutoStartConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateAutoStartConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpCreateAutoStartConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAutoStartConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateAutoStartConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "CreateAutoStartConfiguration",
}
}
| 127 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) CreateDirectNetworkInterface(ctx context.Context, params *CreateDirectNetworkInterfaceInput, optFns ...func(*Options)) (*CreateDirectNetworkInterfaceOutput, error) {
if params == nil {
params = &CreateDirectNetworkInterfaceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateDirectNetworkInterface", params, optFns, c.addOperationCreateDirectNetworkInterfaceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateDirectNetworkInterfaceOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateDirectNetworkInterfaceInput struct {
// This member is required.
PhysicalNetworkInterfaceId *string
InstanceId *string
MacAddress *string
VlanId *int32
noSmithyDocumentSerde
}
type CreateDirectNetworkInterfaceOutput struct {
// This member is required.
DirectNetworkInterface *types.DirectNetworkInterface
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateDirectNetworkInterfaceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateDirectNetworkInterface{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateDirectNetworkInterface{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpCreateDirectNetworkInterfaceValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDirectNetworkInterface(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateDirectNetworkInterface(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "CreateDirectNetworkInterface",
}
}
| 125 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) CreateTags(ctx context.Context, params *CreateTagsInput, optFns ...func(*Options)) (*CreateTagsOutput, error) {
if params == nil {
params = &CreateTagsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateTags", params, optFns, c.addOperationCreateTagsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateTagsOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateTagsInput struct {
// This member is required.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateTagsOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateTagsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateTags{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateTags{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpCreateTagsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTags(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateTags(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "CreateTags",
}
}
| 115 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) CreateVirtualNetworkInterface(ctx context.Context, params *CreateVirtualNetworkInterfaceInput, optFns ...func(*Options)) (*CreateVirtualNetworkInterfaceOutput, error) {
if params == nil {
params = &CreateVirtualNetworkInterfaceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateVirtualNetworkInterface", params, optFns, c.addOperationCreateVirtualNetworkInterfaceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateVirtualNetworkInterfaceOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateVirtualNetworkInterfaceInput struct {
// This member is required.
IpAddressAssignment types.IpAddressAssignment
// This member is required.
PhysicalNetworkInterfaceId *string
StaticIpAddressConfiguration *types.StaticIpAddressConfiguration
noSmithyDocumentSerde
}
type CreateVirtualNetworkInterfaceOutput struct {
// This member is required.
VirtualNetworkInterface *types.VirtualNetworkInterface
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateVirtualNetworkInterfaceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateVirtualNetworkInterface{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateVirtualNetworkInterface{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpCreateVirtualNetworkInterfaceValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateVirtualNetworkInterface(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateVirtualNetworkInterface(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "CreateVirtualNetworkInterface",
}
}
| 124 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DeleteAutoStartConfiguration(ctx context.Context, params *DeleteAutoStartConfigurationInput, optFns ...func(*Options)) (*DeleteAutoStartConfigurationOutput, error) {
if params == nil {
params = &DeleteAutoStartConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DeleteAutoStartConfiguration", params, optFns, c.addOperationDeleteAutoStartConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DeleteAutoStartConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type DeleteAutoStartConfigurationInput struct {
// This member is required.
AutoStartConfigurationArn *string
noSmithyDocumentSerde
}
type DeleteAutoStartConfigurationOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDeleteAutoStartConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteAutoStartConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteAutoStartConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteAutoStartConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAutoStartConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDeleteAutoStartConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DeleteAutoStartConfiguration",
}
}
| 114 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DeleteDirectNetworkInterface(ctx context.Context, params *DeleteDirectNetworkInterfaceInput, optFns ...func(*Options)) (*DeleteDirectNetworkInterfaceOutput, error) {
if params == nil {
params = &DeleteDirectNetworkInterfaceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DeleteDirectNetworkInterface", params, optFns, c.addOperationDeleteDirectNetworkInterfaceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DeleteDirectNetworkInterfaceOutput)
out.ResultMetadata = metadata
return out, nil
}
type DeleteDirectNetworkInterfaceInput struct {
// This member is required.
DirectNetworkInterfaceArn *string
noSmithyDocumentSerde
}
type DeleteDirectNetworkInterfaceOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDeleteDirectNetworkInterfaceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteDirectNetworkInterface{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteDirectNetworkInterface{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteDirectNetworkInterfaceValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteDirectNetworkInterface(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDeleteDirectNetworkInterface(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DeleteDirectNetworkInterface",
}
}
| 114 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DeleteTags(ctx context.Context, params *DeleteTagsInput, optFns ...func(*Options)) (*DeleteTagsOutput, error) {
if params == nil {
params = &DeleteTagsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DeleteTags", params, optFns, c.addOperationDeleteTagsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DeleteTagsOutput)
out.ResultMetadata = metadata
return out, nil
}
type DeleteTagsInput struct {
// This member is required.
Tags []types.Tag
noSmithyDocumentSerde
}
type DeleteTagsOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDeleteTagsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteTags{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteTags{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteTagsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteTags(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDeleteTags(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DeleteTags",
}
}
| 115 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DeleteVirtualNetworkInterface(ctx context.Context, params *DeleteVirtualNetworkInterfaceInput, optFns ...func(*Options)) (*DeleteVirtualNetworkInterfaceOutput, error) {
if params == nil {
params = &DeleteVirtualNetworkInterfaceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DeleteVirtualNetworkInterface", params, optFns, c.addOperationDeleteVirtualNetworkInterfaceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DeleteVirtualNetworkInterfaceOutput)
out.ResultMetadata = metadata
return out, nil
}
type DeleteVirtualNetworkInterfaceInput struct {
// This member is required.
VirtualNetworkInterfaceArn *string
noSmithyDocumentSerde
}
type DeleteVirtualNetworkInterfaceOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDeleteVirtualNetworkInterfaceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteVirtualNetworkInterface{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteVirtualNetworkInterface{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteVirtualNetworkInterfaceValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteVirtualNetworkInterface(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDeleteVirtualNetworkInterface(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DeleteVirtualNetworkInterface",
}
}
| 114 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeAutoStartConfigurations(ctx context.Context, params *DescribeAutoStartConfigurationsInput, optFns ...func(*Options)) (*DescribeAutoStartConfigurationsOutput, error) {
if params == nil {
params = &DescribeAutoStartConfigurationsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeAutoStartConfigurations", params, optFns, c.addOperationDescribeAutoStartConfigurationsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeAutoStartConfigurationsOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeAutoStartConfigurationsInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribeAutoStartConfigurationsOutput struct {
AutoStartConfigurations []types.AutoStartConfigurationDetails
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeAutoStartConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeAutoStartConfigurations{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeAutoStartConfigurations{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAutoStartConfigurations(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// DescribeAutoStartConfigurationsAPIClient is a client that implements the
// DescribeAutoStartConfigurations operation.
type DescribeAutoStartConfigurationsAPIClient interface {
DescribeAutoStartConfigurations(context.Context, *DescribeAutoStartConfigurationsInput, ...func(*Options)) (*DescribeAutoStartConfigurationsOutput, error)
}
var _ DescribeAutoStartConfigurationsAPIClient = (*Client)(nil)
// DescribeAutoStartConfigurationsPaginatorOptions is the paginator options for
// DescribeAutoStartConfigurations.
type DescribeAutoStartConfigurationsPaginatorOptions struct {
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// DescribeAutoStartConfigurationsPaginator is a paginator for
// DescribeAutoStartConfigurations.
type DescribeAutoStartConfigurationsPaginator struct {
options DescribeAutoStartConfigurationsPaginatorOptions
client DescribeAutoStartConfigurationsAPIClient
params *DescribeAutoStartConfigurationsInput
nextToken *string
firstPage bool
}
// NewDescribeAutoStartConfigurationsPaginator returns a new
// DescribeAutoStartConfigurationsPaginator.
func NewDescribeAutoStartConfigurationsPaginator(client DescribeAutoStartConfigurationsAPIClient, params *DescribeAutoStartConfigurationsInput, optFns ...func(*DescribeAutoStartConfigurationsPaginatorOptions)) *DescribeAutoStartConfigurationsPaginator {
if params == nil {
params = &DescribeAutoStartConfigurationsInput{}
}
options := DescribeAutoStartConfigurationsPaginatorOptions{}
for _, fn := range optFns {
fn(&options)
}
return &DescribeAutoStartConfigurationsPaginator{
options: options,
client: client,
params: params,
firstPage: true,
nextToken: params.NextToken,
}
}
// HasMorePages returns a boolean indicating whether more pages are available.
func (p *DescribeAutoStartConfigurationsPaginator) HasMorePages() bool {
return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
}
// NextPage retrieves the next DescribeAutoStartConfigurations page.
func (p *DescribeAutoStartConfigurationsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeAutoStartConfigurationsOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
result, err := p.client.DescribeAutoStartConfigurations(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken &&
prevToken != nil &&
p.nextToken != nil &&
*prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opDescribeAutoStartConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeAutoStartConfigurations",
}
}
| 196 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeAutoUpdateStrategies(ctx context.Context, params *DescribeAutoUpdateStrategiesInput, optFns ...func(*Options)) (*DescribeAutoUpdateStrategiesOutput, error) {
if params == nil {
params = &DescribeAutoUpdateStrategiesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeAutoUpdateStrategies", params, optFns, c.addOperationDescribeAutoUpdateStrategiesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeAutoUpdateStrategiesOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeAutoUpdateStrategiesInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribeAutoUpdateStrategiesOutput struct {
AutoUpdateStrategies []types.AutoUpdateStrategy
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeAutoUpdateStrategiesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeAutoUpdateStrategies{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeAutoUpdateStrategies{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAutoUpdateStrategies(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// DescribeAutoUpdateStrategiesAPIClient is a client that implements the
// DescribeAutoUpdateStrategies operation.
type DescribeAutoUpdateStrategiesAPIClient interface {
DescribeAutoUpdateStrategies(context.Context, *DescribeAutoUpdateStrategiesInput, ...func(*Options)) (*DescribeAutoUpdateStrategiesOutput, error)
}
var _ DescribeAutoUpdateStrategiesAPIClient = (*Client)(nil)
// DescribeAutoUpdateStrategiesPaginatorOptions is the paginator options for
// DescribeAutoUpdateStrategies.
type DescribeAutoUpdateStrategiesPaginatorOptions struct {
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// DescribeAutoUpdateStrategiesPaginator is a paginator for
// DescribeAutoUpdateStrategies.
type DescribeAutoUpdateStrategiesPaginator struct {
options DescribeAutoUpdateStrategiesPaginatorOptions
client DescribeAutoUpdateStrategiesAPIClient
params *DescribeAutoUpdateStrategiesInput
nextToken *string
firstPage bool
}
// NewDescribeAutoUpdateStrategiesPaginator returns a new
// DescribeAutoUpdateStrategiesPaginator.
func NewDescribeAutoUpdateStrategiesPaginator(client DescribeAutoUpdateStrategiesAPIClient, params *DescribeAutoUpdateStrategiesInput, optFns ...func(*DescribeAutoUpdateStrategiesPaginatorOptions)) *DescribeAutoUpdateStrategiesPaginator {
if params == nil {
params = &DescribeAutoUpdateStrategiesInput{}
}
options := DescribeAutoUpdateStrategiesPaginatorOptions{}
for _, fn := range optFns {
fn(&options)
}
return &DescribeAutoUpdateStrategiesPaginator{
options: options,
client: client,
params: params,
firstPage: true,
nextToken: params.NextToken,
}
}
// HasMorePages returns a boolean indicating whether more pages are available.
func (p *DescribeAutoUpdateStrategiesPaginator) HasMorePages() bool {
return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
}
// NextPage retrieves the next DescribeAutoUpdateStrategies page.
func (p *DescribeAutoUpdateStrategiesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeAutoUpdateStrategiesOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
result, err := p.client.DescribeAutoUpdateStrategies(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken &&
prevToken != nil &&
p.nextToken != nil &&
*prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opDescribeAutoUpdateStrategies(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeAutoUpdateStrategies",
}
}
| 196 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeCluster(ctx context.Context, params *DescribeClusterInput, optFns ...func(*Options)) (*DescribeClusterOutput, error) {
if params == nil {
params = &DescribeClusterInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeCluster", params, optFns, c.addOperationDescribeClusterMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeClusterOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeClusterInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribeClusterOutput struct {
// This member is required.
ClusterId *string
Devices []types.Device
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeClusterMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeCluster{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeCluster{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCluster(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeCluster(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeCluster",
}
}
| 118 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeDevice(ctx context.Context, params *DescribeDeviceInput, optFns ...func(*Options)) (*DescribeDeviceOutput, error) {
if params == nil {
params = &DescribeDeviceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeDevice", params, optFns, c.addOperationDescribeDeviceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeDeviceOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeDeviceInput struct {
noSmithyDocumentSerde
}
type DescribeDeviceOutput struct {
// This member is required.
DeviceId *string
// This member is required.
UnlockStatus *types.UnlockStatus
ActiveNetworkInterface *types.NetworkInterface
ClusterAssociation *types.ClusterAssociation
DeviceCapacities []types.Capacity
DeviceType *string
PhysicalNetworkInterfaces []types.PhysicalNetworkInterface
Tags []types.Tag
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeDeviceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeDevice{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeDevice{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeDevice(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeDevice(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeDevice",
}
}
| 127 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeDeviceSoftware(ctx context.Context, params *DescribeDeviceSoftwareInput, optFns ...func(*Options)) (*DescribeDeviceSoftwareOutput, error) {
if params == nil {
params = &DescribeDeviceSoftwareInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeDeviceSoftware", params, optFns, c.addOperationDescribeDeviceSoftwareMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeDeviceSoftwareOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeDeviceSoftwareInput struct {
noSmithyDocumentSerde
}
type DescribeDeviceSoftwareOutput struct {
InstallState types.InstallState
InstalledVersion *string
InstallingVersion *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeDeviceSoftwareMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeDeviceSoftware{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeDeviceSoftware{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeDeviceSoftware(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeDeviceSoftware(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeDeviceSoftware",
}
}
| 114 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeDirectNetworkInterfaces(ctx context.Context, params *DescribeDirectNetworkInterfacesInput, optFns ...func(*Options)) (*DescribeDirectNetworkInterfacesOutput, error) {
if params == nil {
params = &DescribeDirectNetworkInterfacesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeDirectNetworkInterfaces", params, optFns, c.addOperationDescribeDirectNetworkInterfacesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeDirectNetworkInterfacesOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeDirectNetworkInterfacesInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribeDirectNetworkInterfacesOutput struct {
// This member is required.
DirectNetworkInterfaces []types.DirectNetworkInterface
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeDirectNetworkInterfacesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeDirectNetworkInterfaces{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeDirectNetworkInterfaces{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeDirectNetworkInterfaces(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeDirectNetworkInterfaces(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeDirectNetworkInterfaces",
}
}
| 116 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeFeatures(ctx context.Context, params *DescribeFeaturesInput, optFns ...func(*Options)) (*DescribeFeaturesOutput, error) {
if params == nil {
params = &DescribeFeaturesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeFeatures", params, optFns, c.addOperationDescribeFeaturesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeFeaturesOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeFeaturesInput struct {
noSmithyDocumentSerde
}
type DescribeFeaturesOutput struct {
RemoteManagementState types.RemoteManagementState
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeFeaturesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeFeatures{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeFeatures{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeFeatures(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeFeatures(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeFeatures",
}
}
| 110 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribePhysicalNetworkInterfaces(ctx context.Context, params *DescribePhysicalNetworkInterfacesInput, optFns ...func(*Options)) (*DescribePhysicalNetworkInterfacesOutput, error) {
if params == nil {
params = &DescribePhysicalNetworkInterfacesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribePhysicalNetworkInterfaces", params, optFns, c.addOperationDescribePhysicalNetworkInterfacesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribePhysicalNetworkInterfacesOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribePhysicalNetworkInterfacesInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribePhysicalNetworkInterfacesOutput struct {
// This member is required.
PhysicalNetworkInterfaces []types.PhysicalNetworkInterface
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribePhysicalNetworkInterfacesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribePhysicalNetworkInterfaces{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribePhysicalNetworkInterfaces{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePhysicalNetworkInterfaces(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// DescribePhysicalNetworkInterfacesAPIClient is a client that implements the
// DescribePhysicalNetworkInterfaces operation.
type DescribePhysicalNetworkInterfacesAPIClient interface {
DescribePhysicalNetworkInterfaces(context.Context, *DescribePhysicalNetworkInterfacesInput, ...func(*Options)) (*DescribePhysicalNetworkInterfacesOutput, error)
}
var _ DescribePhysicalNetworkInterfacesAPIClient = (*Client)(nil)
// DescribePhysicalNetworkInterfacesPaginatorOptions is the paginator options for
// DescribePhysicalNetworkInterfaces.
type DescribePhysicalNetworkInterfacesPaginatorOptions struct {
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// DescribePhysicalNetworkInterfacesPaginator is a paginator for
// DescribePhysicalNetworkInterfaces.
type DescribePhysicalNetworkInterfacesPaginator struct {
options DescribePhysicalNetworkInterfacesPaginatorOptions
client DescribePhysicalNetworkInterfacesAPIClient
params *DescribePhysicalNetworkInterfacesInput
nextToken *string
firstPage bool
}
// NewDescribePhysicalNetworkInterfacesPaginator returns a new
// DescribePhysicalNetworkInterfacesPaginator.
func NewDescribePhysicalNetworkInterfacesPaginator(client DescribePhysicalNetworkInterfacesAPIClient, params *DescribePhysicalNetworkInterfacesInput, optFns ...func(*DescribePhysicalNetworkInterfacesPaginatorOptions)) *DescribePhysicalNetworkInterfacesPaginator {
if params == nil {
params = &DescribePhysicalNetworkInterfacesInput{}
}
options := DescribePhysicalNetworkInterfacesPaginatorOptions{}
for _, fn := range optFns {
fn(&options)
}
return &DescribePhysicalNetworkInterfacesPaginator{
options: options,
client: client,
params: params,
firstPage: true,
nextToken: params.NextToken,
}
}
// HasMorePages returns a boolean indicating whether more pages are available.
func (p *DescribePhysicalNetworkInterfacesPaginator) HasMorePages() bool {
return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
}
// NextPage retrieves the next DescribePhysicalNetworkInterfaces page.
func (p *DescribePhysicalNetworkInterfacesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribePhysicalNetworkInterfacesOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
result, err := p.client.DescribePhysicalNetworkInterfaces(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken &&
prevToken != nil &&
p.nextToken != nil &&
*prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opDescribePhysicalNetworkInterfaces(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribePhysicalNetworkInterfaces",
}
}
| 198 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
func (c *Client) DescribeReturnShippingLabel(ctx context.Context, params *DescribeReturnShippingLabelInput, optFns ...func(*Options)) (*DescribeReturnShippingLabelOutput, error) {
if params == nil {
params = &DescribeReturnShippingLabelInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeReturnShippingLabel", params, optFns, c.addOperationDescribeReturnShippingLabelMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeReturnShippingLabelOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeReturnShippingLabelInput struct {
noSmithyDocumentSerde
}
type DescribeReturnShippingLabelOutput struct {
ShippingLabelUpdateStatus types.ShippingLabelUpdateStatus
VersionDate *time.Time
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeReturnShippingLabelMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeReturnShippingLabel{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeReturnShippingLabel{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeReturnShippingLabel(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeReturnShippingLabel(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeReturnShippingLabel",
}
}
| 113 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeService(ctx context.Context, params *DescribeServiceInput, optFns ...func(*Options)) (*DescribeServiceOutput, error) {
if params == nil {
params = &DescribeServiceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeService", params, optFns, c.addOperationDescribeServiceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeServiceOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeServiceInput struct {
// This member is required.
ServiceId *string
noSmithyDocumentSerde
}
type DescribeServiceOutput struct {
// This member is required.
ServiceId *string
Endpoints []types.Endpoint
RoleArn *string
ServiceCapacities []types.Capacity
ServiceConfiguration *types.ServiceConfiguration
Status *types.ServiceStatus
Storage *types.ServiceStorage
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeServiceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeService{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeService{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDescribeServiceValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeService(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeService(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeService",
}
}
| 131 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeTags(ctx context.Context, params *DescribeTagsInput, optFns ...func(*Options)) (*DescribeTagsOutput, error) {
if params == nil {
params = &DescribeTagsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeTags", params, optFns, c.addOperationDescribeTagsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeTagsOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeTagsInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribeTagsOutput struct {
NextToken *string
Tags []types.Tag
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeTagsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeTags{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeTags{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTags(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// DescribeTagsAPIClient is a client that implements the DescribeTags operation.
type DescribeTagsAPIClient interface {
DescribeTags(context.Context, *DescribeTagsInput, ...func(*Options)) (*DescribeTagsOutput, error)
}
var _ DescribeTagsAPIClient = (*Client)(nil)
// DescribeTagsPaginatorOptions is the paginator options for DescribeTags.
type DescribeTagsPaginatorOptions struct {
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// DescribeTagsPaginator is a paginator for DescribeTags.
type DescribeTagsPaginator struct {
options DescribeTagsPaginatorOptions
client DescribeTagsAPIClient
params *DescribeTagsInput
nextToken *string
firstPage bool
}
// NewDescribeTagsPaginator returns a new DescribeTagsPaginator.
func NewDescribeTagsPaginator(client DescribeTagsAPIClient, params *DescribeTagsInput, optFns ...func(*DescribeTagsPaginatorOptions)) *DescribeTagsPaginator {
if params == nil {
params = &DescribeTagsInput{}
}
options := DescribeTagsPaginatorOptions{}
for _, fn := range optFns {
fn(&options)
}
return &DescribeTagsPaginator{
options: options,
client: client,
params: params,
firstPage: true,
nextToken: params.NextToken,
}
}
// HasMorePages returns a boolean indicating whether more pages are available.
func (p *DescribeTagsPaginator) HasMorePages() bool {
return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
}
// NextPage retrieves the next DescribeTags page.
func (p *DescribeTagsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeTagsOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
result, err := p.client.DescribeTags(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken &&
prevToken != nil &&
p.nextToken != nil &&
*prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opDescribeTags(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeTags",
}
}
| 192 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeTimeSources(ctx context.Context, params *DescribeTimeSourcesInput, optFns ...func(*Options)) (*DescribeTimeSourcesOutput, error) {
if params == nil {
params = &DescribeTimeSourcesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeTimeSources", params, optFns, c.addOperationDescribeTimeSourcesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeTimeSourcesOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeTimeSourcesInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribeTimeSourcesOutput struct {
// This member is required.
Sources []types.TimeSourceStatus
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeTimeSourcesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeTimeSources{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeTimeSources{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTimeSources(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDescribeTimeSources(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeTimeSources",
}
}
| 116 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DescribeVirtualNetworkInterfaces(ctx context.Context, params *DescribeVirtualNetworkInterfacesInput, optFns ...func(*Options)) (*DescribeVirtualNetworkInterfacesOutput, error) {
if params == nil {
params = &DescribeVirtualNetworkInterfacesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeVirtualNetworkInterfaces", params, optFns, c.addOperationDescribeVirtualNetworkInterfacesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeVirtualNetworkInterfacesOutput)
out.ResultMetadata = metadata
return out, nil
}
type DescribeVirtualNetworkInterfacesInput struct {
NextToken *string
noSmithyDocumentSerde
}
type DescribeVirtualNetworkInterfacesOutput struct {
// This member is required.
VirtualNetworkInterfaces []types.VirtualNetworkInterface
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeVirtualNetworkInterfacesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeVirtualNetworkInterfaces{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeVirtualNetworkInterfaces{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVirtualNetworkInterfaces(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// DescribeVirtualNetworkInterfacesAPIClient is a client that implements the
// DescribeVirtualNetworkInterfaces operation.
type DescribeVirtualNetworkInterfacesAPIClient interface {
DescribeVirtualNetworkInterfaces(context.Context, *DescribeVirtualNetworkInterfacesInput, ...func(*Options)) (*DescribeVirtualNetworkInterfacesOutput, error)
}
var _ DescribeVirtualNetworkInterfacesAPIClient = (*Client)(nil)
// DescribeVirtualNetworkInterfacesPaginatorOptions is the paginator options for
// DescribeVirtualNetworkInterfaces.
type DescribeVirtualNetworkInterfacesPaginatorOptions struct {
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// DescribeVirtualNetworkInterfacesPaginator is a paginator for
// DescribeVirtualNetworkInterfaces.
type DescribeVirtualNetworkInterfacesPaginator struct {
options DescribeVirtualNetworkInterfacesPaginatorOptions
client DescribeVirtualNetworkInterfacesAPIClient
params *DescribeVirtualNetworkInterfacesInput
nextToken *string
firstPage bool
}
// NewDescribeVirtualNetworkInterfacesPaginator returns a new
// DescribeVirtualNetworkInterfacesPaginator.
func NewDescribeVirtualNetworkInterfacesPaginator(client DescribeVirtualNetworkInterfacesAPIClient, params *DescribeVirtualNetworkInterfacesInput, optFns ...func(*DescribeVirtualNetworkInterfacesPaginatorOptions)) *DescribeVirtualNetworkInterfacesPaginator {
if params == nil {
params = &DescribeVirtualNetworkInterfacesInput{}
}
options := DescribeVirtualNetworkInterfacesPaginatorOptions{}
for _, fn := range optFns {
fn(&options)
}
return &DescribeVirtualNetworkInterfacesPaginator{
options: options,
client: client,
params: params,
firstPage: true,
nextToken: params.NextToken,
}
}
// HasMorePages returns a boolean indicating whether more pages are available.
func (p *DescribeVirtualNetworkInterfacesPaginator) HasMorePages() bool {
return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
}
// NextPage retrieves the next DescribeVirtualNetworkInterfaces page.
func (p *DescribeVirtualNetworkInterfacesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeVirtualNetworkInterfacesOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
result, err := p.client.DescribeVirtualNetworkInterfaces(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken &&
prevToken != nil &&
p.nextToken != nil &&
*prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opDescribeVirtualNetworkInterfaces(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DescribeVirtualNetworkInterfaces",
}
}
| 198 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) DownloadUpdates(ctx context.Context, params *DownloadUpdatesInput, optFns ...func(*Options)) (*DownloadUpdatesOutput, error) {
if params == nil {
params = &DownloadUpdatesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DownloadUpdates", params, optFns, c.addOperationDownloadUpdatesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DownloadUpdatesOutput)
out.ResultMetadata = metadata
return out, nil
}
type DownloadUpdatesInput struct {
noSmithyDocumentSerde
}
type DownloadUpdatesOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDownloadUpdatesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDownloadUpdates{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDownloadUpdates{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDownloadUpdates(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDownloadUpdates(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "DownloadUpdates",
}
}
| 107 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) GetCertificate(ctx context.Context, params *GetCertificateInput, optFns ...func(*Options)) (*GetCertificateOutput, error) {
if params == nil {
params = &GetCertificateInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetCertificate", params, optFns, c.addOperationGetCertificateMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetCertificateOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetCertificateInput struct {
// This member is required.
CertificateArn *string
noSmithyDocumentSerde
}
type GetCertificateOutput struct {
// This member is required.
Certificate *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetCertificateMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCertificate{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCertificate{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpGetCertificateValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCertificate(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetCertificate(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "GetCertificate",
}
}
| 118 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) GetNotificationConfiguration(ctx context.Context, params *GetNotificationConfigurationInput, optFns ...func(*Options)) (*GetNotificationConfigurationOutput, error) {
if params == nil {
params = &GetNotificationConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetNotificationConfiguration", params, optFns, c.addOperationGetNotificationConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetNotificationConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetNotificationConfigurationInput struct {
// This member is required.
ServiceId *string
noSmithyDocumentSerde
}
type GetNotificationConfigurationOutput struct {
BrokerEndPoint *string
Enabled *bool
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetNotificationConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetNotificationConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpGetNotificationConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetNotificationConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "GetNotificationConfiguration",
}
}
| 118 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
func (c *Client) GetReturnShippingLabel(ctx context.Context, params *GetReturnShippingLabelInput, optFns ...func(*Options)) (*GetReturnShippingLabelOutput, error) {
if params == nil {
params = &GetReturnShippingLabelInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetReturnShippingLabel", params, optFns, c.addOperationGetReturnShippingLabelMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetReturnShippingLabelOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetReturnShippingLabelInput struct {
noSmithyDocumentSerde
}
type GetReturnShippingLabelOutput struct {
CurrentVersionDate *time.Time
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetReturnShippingLabelMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetReturnShippingLabel{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetReturnShippingLabel{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetReturnShippingLabel(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetReturnShippingLabel(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "GetReturnShippingLabel",
}
}
| 110 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) GetSecretAccessKey(ctx context.Context, params *GetSecretAccessKeyInput, optFns ...func(*Options)) (*GetSecretAccessKeyOutput, error) {
if params == nil {
params = &GetSecretAccessKeyInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetSecretAccessKey", params, optFns, c.addOperationGetSecretAccessKeyMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetSecretAccessKeyOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetSecretAccessKeyInput struct {
// This member is required.
AccessKeyId *string
noSmithyDocumentSerde
}
type GetSecretAccessKeyOutput struct {
// This member is required.
AccessKeyId *string
// This member is required.
SecretAccessKey *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetSecretAccessKeyMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetSecretAccessKey{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetSecretAccessKey{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpGetSecretAccessKeyValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSecretAccessKey(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetSecretAccessKey(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "GetSecretAccessKey",
}
}
| 121 |
eks-anywhere | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package snowballdevice
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) InstallUpdates(ctx context.Context, params *InstallUpdatesInput, optFns ...func(*Options)) (*InstallUpdatesOutput, error) {
if params == nil {
params = &InstallUpdatesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "InstallUpdates", params, optFns, c.addOperationInstallUpdatesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*InstallUpdatesOutput)
out.ResultMetadata = metadata
return out, nil
}
type InstallUpdatesInput struct {
noSmithyDocumentSerde
}
type InstallUpdatesOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationInstallUpdatesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpInstallUpdates{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpInstallUpdates{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInstallUpdates(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opInstallUpdates(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "snowballdevice",
OperationName: "InstallUpdates",
}
}
| 107 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.