repo_name
stringlengths
1
52
repo_creator
stringclasses
6 values
programming_language
stringclasses
4 values
code
stringlengths
0
9.68M
num_lines
int64
1
234k
eks-distro-build-tooling
aws
Go
/* Package logger implements a simple way to init a global logger and access it through a logr.Logger interface. Message: All messages should start with a capital letter. Log level: The loggers only support verbosity levels (V-levels) instead of semantic levels. Level zero, the default, matters most. Increasing levels matter less and less. - 0: You always want to see this. - 1: Common logging that you don't want to show by default. - 2: Useful steady state information about the operation and important log messages that may correlate to significant changes in the system. - 3: Extended information about changes. Somehow useful information to the user that is not important enough for level 2. - 4: Debugging information. Starting from this level, all logs are oriented to developers and troubleshooting. - 5: Traces. Information to follow the code path. - 6: Information about interaction with external resources. External binary commands, api calls. - 7: Extra information passed to external systems. Configuration files, kubernetes manifests, etc. - 8: Truncated external binaries and clients output/responses. - 9: Full external binaries and clients output/responses. Logging WithValues: Logging WithValues should be preferred to embedding values into log messages because it allows machine readability. Variables name should start with a capital letter. Logging WithNames: Logging WithNames should be used carefully. Please consider that practices like prefixing the logs with something indicating which part of code is generating the log entry might be useful for developers, but it can create confusion for the end users because it increases the verbosity without providing information the user can understand/take benefit from. Logging errors: A proper error management should always be preferred to the usage of log.Error. */ package logger
43
eks-distro-build-tooling
aws
Go
package logger import ( "fmt" "time" "github.com/go-logr/zapr" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // InitZap creates a zap logger with the provided verbosity level // and sets it as the package logger. // 0 is the least verbose and 10 the most verbose. // The package logger can only be init once, so subsequent calls to this method // won't have any effect. func InitZap(level int, opts ...LoggerOpt) error { cfg := zap.NewDevelopmentConfig() cfg.Level = zap.NewAtomicLevelAt(zapcore.Level(-1 * level)) cfg.EncoderConfig.EncodeLevel = nil cfg.EncoderConfig.EncodeTime = NullTimeEncoder cfg.DisableCaller = true cfg.DisableStacktrace = true // Only enabling this at level 4 because that's when // our debugging levels start. Ref: doc.go if level >= 4 { cfg.EncoderConfig.EncodeLevel = VLevelEncoder cfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder } zapLog, err := cfg.Build() if err != nil { return fmt.Errorf("creating zap logger: %v", err) } logr := zapr.NewLogger(zapLog) for _, opt := range opts { opt(&logr) } set(logr) l.V(4).Info("Logger init completed", "vlevel", level) return nil } // VLevelEncoder serializes a Level to V + v-level number,. func VLevelEncoder(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) { enc.AppendString(fmt.Sprintf("V%d", -1*int(l))) } // NullTimeEncoder skips time serialization. func NullTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) {}
54
eks-distro-build-tooling
aws
Go
package retrier import ( "math" "time" "github.com/aws/eks-distro-build-tooling/tools/eksDistroBuildToolingOpsTools/pkg/logger" ) type Retrier struct { retryPolicy RetryPolicy timeout time.Duration backoffFactor *float32 } type ( // RetryPolicy allows to customize the retrying logic. The boolean retry indicates if a new retry // should be performed and the wait duration indicates the wait time before the next retry RetryPolicy func(totalRetries int, err error) (retry bool, wait time.Duration) RetrierOpt func(*Retrier) ) // New creates a new retrier with a global timeout (max time allowed for the whole execution) // The default retry policy is to always retry with no wait time in between retries func New(timeout time.Duration, opts ...RetrierOpt) *Retrier { r := &Retrier{ timeout: timeout, retryPolicy: zeroWaitPolicy, } for _, o := range opts { o(r) } return r } // NewWithMaxRetries creates a new retrier with no global timeout and a max retries policy func NewWithMaxRetries(maxRetries int, backOffPeriod time.Duration) *Retrier { // this value is roughly 292 years, so in practice there is no timeout return New(time.Duration(math.MaxInt64), WithMaxRetries(maxRetries, backOffPeriod)) } // WithMaxRetries sets a retry policy that will retry up to maxRetries times // with a wait time between retries of backOffPeriod func WithMaxRetries(maxRetries int, backOffPeriod time.Duration) RetrierOpt { return func(r *Retrier) { r.retryPolicy = maxRetriesPolicy(maxRetries, backOffPeriod) } } func WithBackoffFactor(factor float32) RetrierOpt { return func(r *Retrier) { r.backoffFactor = &factor } } func WithRetryPolicy(policy RetryPolicy) RetrierOpt { return func(r *Retrier) { r.retryPolicy = policy } } // Retry runs the fn function until it either successful completes (not error), // the set timeout reached or the retry policy aborts the execution func (r *Retrier) Retry(fn func() error) error { start := time.Now() retries := 0 var err error for retry := true; retry; retry = time.Since(start) < r.timeout { err = fn() retries += 1 if err == nil { logger.V(5).Info("Retry execution successful", "retries", retries, "duration", time.Since(start)) return nil } logger.V(5).Info("Error happened during retry", "error", err, "retries", retries) retry, wait := r.retryPolicy(retries, err) if !retry { logger.V(5).Info("Execution aborted by retry policy") return err } if r.backoffFactor != nil { wait = wait * time.Duration(*r.backoffFactor*float32(retries)) } logger.V(5).Info("Sleeping before next retry", "time", wait) time.Sleep(wait) } logger.V(5).Info("Timeout reached. Returning error", "retries", retries, "duration", time.Since(start), "error", err) return err } // Retry runs fn with a MaxRetriesPolicy func Retry(maxRetries int, backOffPeriod time.Duration, fn func() error) error { r := NewWithMaxRetries(maxRetries, backOffPeriod) return r.Retry(fn) } func zeroWaitPolicy(_ int, _ error) (retry bool, wait time.Duration) { return true, 0 } func maxRetriesPolicy(maxRetries int, backOffPeriod time.Duration) RetryPolicy { return func(totalRetries int, _ error) (retry bool, wait time.Duration) { return totalRetries < maxRetries, backOffPeriod } }
110
eks-distro-prow-jobs
aws
Go
package main import ( _ "embed" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "github.com/aws/eks-distro-prow-jobs/templater/jobs" "github.com/aws/eks-distro-prow-jobs/templater/jobs/types" "github.com/aws/eks-distro-prow-jobs/templater/jobs/utils" ) var jobsFolder = "jobs" var orgsSupported = []string{"aws"} var jobTypes = []string{"periodic", "postsubmit", "presubmit"} //go:embed templates/presubmits.yaml var presubmitTemplate string //go:embed templates/postsubmits.yaml var postsubmitTemplate string //go:embed templates/periodics.yaml var periodicTemplate string //go:embed templates/warning.txt var editWarning string //go:generate cp ../BUILDER_BASE_TAG_FILE ./BUILDER_BASE_TAG_FILE //go:embed BUILDER_BASE_TAG_FILE var builderBaseTag string var buildkitImageTag = "v0.10.5-rootless" func main() { jobsFolderPath, err := getJobsFolderPath() if err != nil { fmt.Printf("Error getting jobs folder path: %v", err) os.Exit(1) } for _, org := range orgsSupported { if err = os.RemoveAll(filepath.Join(jobsFolderPath, org)); err != nil { fmt.Printf("Error removing jobs folder path: %v", err) os.Exit(1) } } for _, jobType := range jobTypes { jobList, err := jobs.GetJobList(jobType) if err != nil { fmt.Printf("Error getting job list: %v\n", err) os.Exit(1) } template, err := useTemplate(jobType) if err != nil { fmt.Printf("Error getting job list: %v\n", err) os.Exit(1) } for repoName, jobConfigs := range jobList { for fileName, jobConfig := range jobConfigs { envVars := jobConfig.EnvVars if jobConfig.UseDockerBuildX { envVars = append(envVars, &types.EnvVar{Name: "BUILDKITD_IMAGE", Value: "moby/buildkit:" + buildkitImageTag}) envVars = append(envVars, &types.EnvVar{Name: "USE_BUILDX", Value: "true"}) } templateBuilderBaseTag := builderBaseTag if jobConfig.UseMinimalBuilderBase { templateBuilderBaseTag = strings.Replace(builderBaseTag, "standard", "minimal", 1) } branches := jobConfig.Branches if jobType == "postsubmit" && len(branches) == 0 { branches = append(branches, "^main$") } cluster, bucket, serviceAccountName := clusterDetails(jobType, jobConfig.Cluster, jobConfig.ServiceAccountName) data := map[string]interface{}{ "architecture": jobConfig.Architecture, "repoName": repoName, "prowjobName": jobConfig.JobName, "runIfChanged": jobConfig.RunIfChanged, "skipIfOnlyChanged": jobConfig.SkipIfOnlyChanged, "branches": branches, "cronExpression": jobConfig.CronExpression, "maxConcurrency": jobConfig.MaxConcurrency, "timeout": jobConfig.Timeout, "extraRefs": jobConfig.ExtraRefs, "imageBuild": jobConfig.ImageBuild, "useDockerBuildX": jobConfig.UseDockerBuildX, "prCreation": jobConfig.PRCreation, "runtimeImage": jobConfig.RuntimeImage, "localRegistry": jobConfig.LocalRegistry, "serviceAccountName": serviceAccountName, "command": strings.Join(jobConfig.Commands, "\n&&\n"), "builderBaseTag": templateBuilderBaseTag, "buildkitImageTag": buildkitImageTag, "resources": jobConfig.Resources, "envVars": envVars, "volumes": jobConfig.Volumes, "volumeMounts": jobConfig.VolumeMounts, "editWarning": editWarning, "automountServiceAccountToken": jobConfig.AutomountServiceAccountToken, "cluster": cluster, "bucket": bucket, "projectPath": jobConfig.ProjectPath, "diskUsage": true, } err := GenerateProwjob(fileName, template, data) if err != nil { fmt.Printf("Error generating Prowjob %s: %v\n", fileName, err) os.Exit(1) } } } } } func GenerateProwjob(prowjobFileName, templateContent string, data map[string]interface{}) error { bytes, err := utils.ExecuteTemplate(templateContent, data) if err != nil { return fmt.Errorf("error executing template: %v", err) } jobsFolderPath, err := getJobsFolderPath() if err != nil { return fmt.Errorf("error getting jobs folder path: %v", err) } prowjobPath := filepath.Join(jobsFolderPath, data["repoName"].(string), prowjobFileName) if err = os.MkdirAll(filepath.Dir(prowjobPath), 0o755); err != nil { return fmt.Errorf("error creating Prowjob directory: %v", err) } if err = ioutil.WriteFile(prowjobPath, bytes, 0o644); err != nil { return fmt.Errorf("error writing to path %s: %v", prowjobPath, err) } return nil } func getJobsFolderPath() (string, error) { gitRootOutput, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() if err != nil { return "", fmt.Errorf("error running the git command: %v", err) } gitRoot := strings.Fields(string(gitRootOutput))[0] return filepath.Join(gitRoot, jobsFolder), nil } func useTemplate(jobType string) (string, error) { switch jobType { case "periodic": return periodicTemplate, nil case "postsubmit": return postsubmitTemplate, nil case "presubmit": return presubmitTemplate, nil default: return "", fmt.Errorf("Unsupported job type: %s", jobType) } } func clusterDetails(jobType string, cluster string, serviceAccountName string) (string, string, string) { if cluster == "prow-postsubmits-cluster" { jobType = "postsubmit" } cluster = "prow-presubmits-cluster" bucket := "s3://prowpresubmitsdataclusterstack-prowbucket7c73355c-vfwwxd2eb4gp" if jobType == "postsubmit" || jobType == "periodic" { cluster = "prow-postsubmits-cluster" bucket = "s3://prowdataclusterstack-316434458-prowbucket7c73355c-1n9f9v93wpjcm" } if len(serviceAccountName) == 0 { serviceAccountName = jobType + "s-build-account" } return cluster, bucket, serviceAccountName }
193
eks-distro-prow-jobs
aws
Go
package jobs import ( "fmt" "github.com/aws/eks-distro-prow-jobs/templater/jobs/types" "github.com/aws/eks-distro-prow-jobs/templater/jobs/utils" ) func GetJobList(jobType string) (map[string]map[string]types.JobConfig, error) { switch jobType { case "periodic": repos := []string{"eks-distro", "eks-distro-build-tooling"} periodicsList, err := utils.GetJobsByType(repos, "periodic") if err != nil { return nil, fmt.Errorf("error getting periodic list:%v", err) } return periodicsList, nil case "postsubmit": repos := []string{"eks-distro", "eks-distro-build-tooling"} postsubmitsList, err := utils.GetJobsByType(repos, "postsubmit") if err != nil { return nil, fmt.Errorf("error getting postsubmits list:%v", err) } return postsubmitsList, nil case "presubmit": repos := []string{"eks-distro", "eks-distro-build-tooling", "eks-distro-prow-jobs"} presubmitsList, err := utils.GetJobsByType(repos, "presubmit") if err != nil { return nil, fmt.Errorf("error getting presubmits list:%v", err) } return presubmitsList, nil default: return nil, fmt.Errorf("Unsupported job type: %s", jobType) } }
37
eks-distro-prow-jobs
aws
Go
package types type ExtraRef struct { BaseRef string `json:"baseRef,omitempty"` Org string `json:"org,omitempty"` Repo string `json:"repo,omitempty"` } type EnvVar struct { Name string `json:"name,omitempty"` Value string `json:"value,omitempty"` } type Resources struct { Requests *ResourceConfig `json:"requests,omitempty"` Limits *ResourceConfig `json:"limits,omitempty"` } type ResourceConfig struct { CPU string `json:"cpu,omitempty"` Memory string `json:"memory,omitempty"` } type HostPath struct { Path string `json:"path,omitempty"` Type string `json:"type,omitempty"` } type Secret struct { Name string `json:"name,omitempty"` DefaultMode int `json:"defaultMode,omitempty"` } type Volume struct { Name string `json:"name,omitempty"` VolumeType string `json:"volumeType,omitempty"` HostPath *HostPath `json:"hostPath,omitempty"` Secret *Secret `json:"secret,omitempty"` } type VolumeMount struct { Name string `json:"name,omitempty"` MountPath string `json:"mountPath,omitempty"` ReadOnly bool `json:"readOnly,omitempty"` } type JobConfig struct { Architecture string `json:"architecture,omitempty"` JobName string `json:"jobName,omitempty"` RunIfChanged string `json:"runIfChanged,omitempty"` SkipIfOnlyChanged string `json:"skipIfOnlyChanged,omitempty"` Branches []string `json:"branches,omitempty"` MaxConcurrency int `json:"maxConcurrency,omitempty"` CronExpression string `json:"cronExpression,omitempty"` Timeout string `json:"timeout,omitempty"` ImageBuild bool `json:"imageBuild,omitempty"` UseDockerBuildX bool `json:"useDockerBuildX,omitempty"` UseMinimalBuilderBase bool `json:"useMinimalBuilderBase,omitempty"` PRCreation bool `json:"prCreation,omitempty"` RuntimeImage string `json:"runtimeImage,omitempty"` LocalRegistry bool `json:"localRegistry,omitempty"` ExtraRefs []*ExtraRef `json:"extraRefs,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty"` EnvVars []*EnvVar `json:"envVars,omitempty"` Commands []string `json:"commands,omitempty"` Resources *Resources `json:"resources,omitempty"` VolumeMounts []*VolumeMount `json:"volumeMounts,omitempty"` Volumes []*Volume `json:"volumes,omitempty"` AutomountServiceAccountToken string `json:"automountServiceAccountToken,omitempty"` Cluster string `json:"cluster,omitempty"` Bucket string `json:"bucket,omitempty"` ProjectPath string `json:"projectPath,omitempty"` }
74
eks-distro-prow-jobs
aws
Go
package utils import ( "bytes" "fmt" "io/ioutil" "path/filepath" "strings" "text/template" "github.com/ghodss/yaml" "github.com/aws/eks-distro-prow-jobs/templater/jobs/types" ) var releaseBranches = []string{ "1-23", "1-24", "1-25", "1-26", "1-27", } var golangVersions = []string{ "1-18", "1-19", "1-20", } var pythonVersions = []string{ "3-7", "3-9", } var alVersions = []string{ "2", "2023", } func GetJobsByType(repos []string, jobType string) (map[string]map[string]types.JobConfig, error) { jobsListByType := map[string]map[string]types.JobConfig{} for _, repo := range repos { jobDir := filepath.Join("jobs", jobType, repo) jobList, err := UnmarshalJobs(jobDir) if err != nil { return nil, fmt.Errorf("error reading job directory %s: %v", jobDir, err) } jobsListByType[fmt.Sprintf("aws/%s", repo)] = jobList } return jobsListByType, nil } func AppendMap(current map[string]interface{}, new map[string]interface{}) map[string]interface{} { newMap := map[string]interface{}{} for k, v := range current { newMap[k] = v } for k, v := range new { newMap[k] = v } return newMap } func AddALVersion(fileName string, data map[string]interface{}) map[string]map[string]interface{} { jobList := map[string]map[string]interface{}{} if !strings.Contains(fileName, "al-X") { return jobList } for _, version := range alVersions { alVersionFileName := strings.ReplaceAll(fileName, "al-X", "al-"+version) jobList[alVersionFileName] = AppendMap(data, map[string]interface{}{ "alVersion": version, }) } return jobList } func AddGolangVersion(fileName string, data map[string]interface{}) map[string]map[string]interface{} { jobList := map[string]map[string]interface{}{} if !strings.Contains(fileName, "golang-1-X") { return jobList } for _, version := range golangVersions { golangVersionFileName := strings.ReplaceAll(fileName, "golang-1-X", "golang-"+version) goVersion := strings.Replace(version, "-", ".", 1) jobList[golangVersionFileName] = AppendMap(data, map[string]interface{}{ "jobGoVersion": version, "golangVersion": goVersion, }) } return jobList } func AddPythonVersion(fileName string, data map[string]interface{}) map[string]map[string]interface{} { jobList := map[string]map[string]interface{}{} if !strings.Contains(fileName, "python-3-X") { return jobList } for _, version := range pythonVersions { pythonVersionFileName := strings.ReplaceAll(fileName, "python-3-X", "python-"+version) pythonVersion := strings.Replace(version, "-", ".", 1) jobList[pythonVersionFileName] = AppendMap(data, map[string]interface{}{ "jobPythonVersion": version, "pythonVersion": pythonVersion, }) } return jobList } func AddReleaseBranch(fileName string, data map[string]interface{}) map[string]map[string]interface{} { jobList := map[string]map[string]interface{}{} if !strings.Contains(fileName, "1-X") { return jobList } for i, releaseBranch := range releaseBranches { releaseBranchBasedFileName := strings.ReplaceAll(fileName, "1-X", releaseBranch) otherReleaseBranches := append(append([]string{}, releaseBranches[:i]...), releaseBranches[i+1:]...) jobList[releaseBranchBasedFileName] = AppendMap(data, map[string]interface{}{ "releaseBranch": releaseBranch, "otherReleaseBranches": strings.Join(otherReleaseBranches, "|"), }) // If latest release branch, check if the release branch dir exists before executing cmd // This allows us to experiment with adding prow jobs for new branches without failing other runs if len(releaseBranches)-1 == i { jobList[releaseBranchBasedFileName]["latestReleaseBranch"] = true } } return jobList } func RunMappers(jobsToData map[string]map[string]interface{}, mappers []func(string, map[string]interface{}) map[string]map[string]interface{}) { if len(mappers) == 0 { return } for fileName, data := range jobsToData { newJobList := mappers[0](fileName, data) if len(newJobList) == 0 { continue } for k, v := range newJobList { jobsToData[k] = v if _, ok := data["templateFileName"]; !ok { jobsToData[k]["templateFileName"] = fileName } } delete(jobsToData, fileName) } RunMappers(jobsToData, mappers[1:]) } func UnmarshalJobs(jobDir string) (map[string]types.JobConfig, error) { files, err := ioutil.ReadDir(jobDir) if err != nil { return nil, fmt.Errorf("error reading job directory %s: %v", jobDir, err) } var mappers []func(string, map[string]interface{}) map[string]map[string]interface{} mappers = append(mappers, AddALVersion, AddGolangVersion, AddPythonVersion, AddReleaseBranch) jobsToData := map[string]map[string]interface{}{} for _, file := range files { jobsToData[file.Name()] = map[string]interface{}{} } RunMappers(jobsToData, mappers) finalJobList := map[string]types.JobConfig{} for fileName, data := range jobsToData { templateFileName := fileName if name, ok := data["templateFileName"]; ok { templateFileName = name.(string) } jobConfig, err := GenerateJobConfig(data, filepath.Join(jobDir, templateFileName)) if err != nil { return nil, fmt.Errorf("%v", err) } if latest, ok := data["latestReleaseBranch"]; ok && latest.(bool) { for j, command := range jobConfig.Commands { jobConfig.Commands[j] = "if make check-for-supported-release-branch -C $PROJECT_PATH; then " + command + "; fi" } } finalJobList[fileName] = jobConfig } return finalJobList, nil } func ExecuteTemplate(templateContent string, data interface{}) ([]byte, error) { temp := template.New("template") funcMap := map[string]interface{}{ "indent": func(spaces int, v string) string { pad := strings.Repeat(" ", spaces) return pad + strings.Replace(v, "\n", "\n"+pad, -1) }, "stringsJoin": strings.Join, "trim": strings.TrimSpace, } temp = temp.Funcs(funcMap) temp, err := temp.Parse(templateContent) if err != nil { return nil, fmt.Errorf("error parsing template: %v", err) } var buf bytes.Buffer err = temp.Execute(&buf, data) if err != nil { return nil, fmt.Errorf("error substituting values for template: %v", err) } return buf.Bytes(), nil } func GenerateJobConfig(data interface{}, filePath string) (types.JobConfig, error) { var jobConfig types.JobConfig contents, err := ioutil.ReadFile(filePath) if err != nil { return jobConfig, fmt.Errorf("error reading job YAML %s: %v", filePath, err) } var templatedContents []byte if data != nil { templatedContents, err = ExecuteTemplate(string(contents), data) if err != nil { return jobConfig, fmt.Errorf("error executing template: %v", err) } err = yaml.Unmarshal(templatedContents, &jobConfig) if err != nil { return jobConfig, fmt.Errorf("error unmarshaling contents of file %s: %v", filePath, err) } } else { err = yaml.Unmarshal(contents, &jobConfig) if err != nil { return jobConfig, fmt.Errorf("error unmarshaling contents of file %s: %v", filePath, err) } } return jobConfig, nil }
258
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "os" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log/zap" bootstrapv1alpha3 "github.com/aws/etcdadm-bootstrap-provider/api/v1alpha3" bootstrapv1beta1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/controllers" // +kubebuilder:scaffold:imports ) var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") watchNamespace string ) func init() { _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) _ = bootstrapv1alpha3.AddToScheme(scheme) _ = bootstrapv1beta1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } func main() { var metricsAddr string var enableLeaderElection bool flag.StringVar(&metricsAddr, "metrics-addr", "localhost:8080", "The address the metric endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") flag.StringVar(&watchNamespace, "namespace", "", "Namespace that the controller watches to reconcile etcdadmConfig objects. If unspecified, the controller watches forobjects across all namespaces.") flag.Parse() ctrl.SetLogger(zap.New(zap.UseDevMode(true))) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsAddr, Port: 9443, LeaderElection: enableLeaderElection, LeaderElectionID: "e949c6e2.cluster.x-k8s.io", Namespace: watchNamespace, }) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } // Setup the context that's going to be used in controllers and for the manager. ctx := ctrl.SetupSignalHandler() if err = (&controllers.EtcdadmConfigReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("EtcdadmConfig"), Scheme: mgr.GetScheme(), }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "EtcdadmConfig") os.Exit(1) } if err = (&bootstrapv1beta1.EtcdadmConfig{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "EtcdadmConfig") os.Exit(1) } // +kubebuilder:scaffold:builder setupLog.Info("starting manager") if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } }
100
etcdadm-bootstrap-provider
aws
Go
package v1alpha3 import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" etcdv1beta1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" ) // ConvertTo converts this EtcdadmConfig to the Hub version (v1beta1). func (src *EtcdadmConfig) ConvertTo(dstRaw conversion.Hub) error { // nolint dst := dstRaw.(*etcdv1beta1.EtcdadmConfig) if err := Convert_v1alpha3_EtcdadmConfig_To_v1beta1_EtcdadmConfig(src, dst, nil); err != nil { return err } return nil } // ConvertFrom converts from the Hub version (v1beta1) to this EtcdadmConfig. func (dst *EtcdadmConfig) ConvertFrom(srcRaw conversion.Hub) error { // nolint src := srcRaw.(*etcdv1beta1.EtcdadmConfig) return Convert_v1beta1_EtcdadmConfig_To_v1alpha3_EtcdadmConfig(src, dst, nil) } // ConvertTo converts this EtcdadmConfigList to the Hub version (v1beta1). func (src *EtcdadmConfigList) ConvertTo(dstRaw conversion.Hub) error { // nolint dst := dstRaw.(*etcdv1beta1.EtcdadmConfigList) if err := Convert_v1alpha3_EtcdadmConfigList_To_v1beta1_EtcdadmConfigList(src, dst, nil); err != nil { return err } return nil } // ConvertFrom converts from the Hub version (v1beta1) to this EtcdadmConfigList. func (dst *EtcdadmConfigList) ConvertFrom(srcRaw conversion.Hub) error { // nolint src := srcRaw.(*etcdv1beta1.EtcdadmConfigList) return Convert_v1beta1_EtcdadmConfigList_To_v1alpha3_EtcdadmConfigList(src, dst, nil) } func Convert_v1beta1_BottlerocketConfig_To_v1alpha3_BottlerocketConfig(in *etcdv1beta1.BottlerocketConfig, out *BottlerocketConfig, s apiconversion.Scope) error { return autoConvert_v1beta1_BottlerocketConfig_To_v1alpha3_BottlerocketConfig(in, out, s) }
43
etcdadm-bootstrap-provider
aws
Go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1alpha3 contains API Schema definitions for the etcd boostrap v1alpha3 API group // +kubebuilder:object:generate=true // +groupName=bootstrap.cluster.x-k8s.io // +k8s:conversion-gen=github.com/aws/etcdadm-bootstrap-provider/api/v1beta1 package v1alpha3
22
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" capbk "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. const ( DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" // CloudConfig make the bootstrap data to be of cloud-config format. CloudConfig Format = "cloud-config" // Bottlerocket make the bootstrap data to be of bottlerocket format. Bottlerocket Format = "bottlerocket" ) // Format specifies the output format of the bootstrap data // +kubebuilder:validation:Enum=cloud-config;bottlerocket type Format string // EtcdadmConfigSpec defines the desired state of EtcdadmConfig type EtcdadmConfigSpec struct { // Users specifies extra users to add // +optional Users []capbk.User `json:"users,omitempty"` // +optional EtcdadmBuiltin bool `json:"etcdadmBuiltin,omitempty"` // +optional EtcdadmInstallCommands []string `json:"etcdadmInstallCommands,omitempty"` // PreEtcdadmCommands specifies extra commands to run before kubeadm runs // +optional PreEtcdadmCommands []string `json:"preEtcdadmCommands,omitempty"` // PostEtcdadmCommands specifies extra commands to run after kubeadm runs // +optional PostEtcdadmCommands []string `json:"postEtcdadmCommands,omitempty"` // Format specifies the output format of the bootstrap data // +optional Format Format `json:"format,omitempty"` // BottlerocketConfig specifies the configuration for the bottlerocket bootstrap data // +optional BottlerocketConfig *BottlerocketConfig `json:"bottlerocketConfig,omitempty"` // CloudInitConfig specifies the configuration for the cloud-init bootstrap data // +optional CloudInitConfig *CloudInitConfig `json:"cloudInitConfig,omitempty"` // Files specifies extra files to be passed to user_data upon creation. // +optional Files []capbk.File `json:"files,omitempty"` // Proxy holds the https and no proxy information // This is only used for bottlerocket // +optional Proxy *ProxyConfiguration `json:"proxy,omitempty"` // RegistryMirror holds the image registry mirror information // This is only used for bottlerocket // +optional RegistryMirror *RegistryMirrorConfiguration `json:"registryMirror,omitempty"` // CipherSuites is a list of comma-delimited supported TLS cipher suites, mapping to the --cipher-suites flag. // Default is empty, which means that they will be auto-populated by Go. // +optional CipherSuites string `json:"cipherSuites,omitempty"` // NTP specifies NTP configuration // +optional NTP *capbk.NTP `json:"ntp,omitempty"` // Certbundle holds additional cert bundles. // +optional CertBundles []capbk.CertBundle `json:"certBundles,omitempty"` } type BottlerocketConfig struct { // EtcdImage specifies the etcd image to use by etcdadm EtcdImage string `json:"etcdImage,omitempty"` // BootstrapImage specifies the container image to use for bottlerocket's bootstrapping BootstrapImage string `json:"bootstrapImage"` // AdminImage specifies the admin container image to use for bottlerocket. // +optional AdminImage string `json:"adminImage,omitempty"` // ControlImage specifies the control container image to use for bottlerocket. // +optional ControlImage string `json:"controlImage,omitempty"` // PauseImage specifies the image to use for the pause container PauseImage string `json:"pauseImage"` // CustomHostContainers adds additional host containers for bottlerocket. // +optional CustomHostContainers []BottlerocketHostContainer `json:"customHostContainers,omitempty"` // CustomBootstrapContainers adds additional bootstrap containers for bottlerocket. // +optional CustomBootstrapContainers []BottlerocketBootstrapContainer `json:"customBootstrapContainers,omitempty"` // Kernel specifies additional kernel settings for bottlerocket Kernel *capbk.BottlerocketKernelSettings `json:"kernel,omitempty"` // Boot specifies boot settings for bottlerocket Boot *capbk.BottlerocketBootSettings `json:"boot,omitempty"` } // BottlerocketHostContainer holds the host container setting for bottlerocket. type BottlerocketHostContainer struct { // Name is the host container name that will be given to the container in BR's `apiserver` // +kubebuilder:validation:Required Name string `json:"name"` // Superpowered indicates if the container will be superpowered // +kubebuilder:validation:Required Superpowered bool `json:"superpowered"` // Image is the actual location of the host container image. Image string `json:"image"` // UserData is the userdata that will be attached to the image. // +optional UserData string `json:"userData,omitempty"` } // BottlerocketBootstrapContainer holds the bootstrap container setting for bottlerocket. type BottlerocketBootstrapContainer struct { // Name is the bootstrap container name that will be given to the container in BR's `apiserver`. // +kubebuilder:validation:Required Name string `json:"name"` // Image is the actual image used for Bottlerocket bootstrap. Image string `json:"image"` // Essential decides whether or not the container should fail the boot process. // Bootstrap containers configured with essential = true will stop the boot process if they exit code is a non-zero value. // Default is false. // +optional Essential bool `json:"essential"` // Mode represents the bootstrap container mode. // +kubebuilder:validation:Enum=always;off;once Mode string `json:"mode"` // UserData is the base64-encoded userdata. // +optional UserData string `json:"userData,omitempty"` } type CloudInitConfig struct { // +optional Version string `json:"version,omitempty"` // EtcdReleaseURL is an optional field to specify where etcdadm can download etcd from // +optional EtcdReleaseURL string `json:"etcdReleaseURL,omitempty"` // InstallDir is an optional field to specify where etcdadm will extract etcd binaries to // +optional InstallDir string `json:"installDir,omitempty"` } // ProxyConfiguration holds the settings for proxying bottlerocket services type ProxyConfiguration struct { // HTTP Proxy HTTPProxy string `json:"httpProxy,omitempty"` // HTTPS proxy HTTPSProxy string `json:"httpsProxy,omitempty"` // No proxy, list of ips that should not use proxy NoProxy []string `json:"noProxy,omitempty"` } // RegistryMirrorConfiguration holds the settings for image registry mirror type RegistryMirrorConfiguration struct { // Endpoint defines the registry mirror endpoint to use for pulling images Endpoint string `json:"endpoint,omitempty"` // CACert defines the CA cert for the registry mirror CACert string `json:"caCert,omitempty"` } // EtcdadmConfigStatus defines the observed state of EtcdadmConfig type EtcdadmConfigStatus struct { // Conditions defines current service state of the KubeadmConfig. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` DataSecretName *string `json:"dataSecretName,omitempty"` Ready bool `json:"ready,omitempty"` } func (c *EtcdadmConfig) GetConditions() clusterv1.Conditions { return c.Status.Conditions } func (c *EtcdadmConfig) SetConditions(conditions clusterv1.Conditions) { c.Status.Conditions = conditions } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // EtcdadmConfig is the Schema for the etcdadmconfigs API type EtcdadmConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec EtcdadmConfigSpec `json:"spec,omitempty"` Status EtcdadmConfigStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true // EtcdadmConfigList contains a list of EtcdadmConfig type EtcdadmConfigList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []EtcdadmConfig `json:"items"` } func init() { SchemeBuilder.Register(&EtcdadmConfig{}, &EtcdadmConfigList{}) }
249
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1alpha3 contains API Schema definitions for the bootstrap v1alpha3 API group // +kubebuilder:object:generate=true // +groupName=bootstrap.cluster.x-k8s.io package v1alpha3 import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1alpha3"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme // localSchemeBuilder is used for type conversions. localSchemeBuilder = SchemeBuilder.SchemeBuilder )
40
etcdadm-bootstrap-provider
aws
Go
//go:build !ignore_autogenerated_etcd_bootstrap // +build !ignore_autogenerated_etcd_bootstrap /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by conversion-gen. DO NOT EDIT. package v1alpha3 import ( unsafe "unsafe" v1beta1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" apiv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) func init() { localSchemeBuilder.Register(RegisterConversions) } // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { if err := s.AddGeneratedConversionFunc((*BottlerocketBootstrapContainer)(nil), (*v1beta1.BottlerocketBootstrapContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_BottlerocketBootstrapContainer_To_v1beta1_BottlerocketBootstrapContainer(a.(*BottlerocketBootstrapContainer), b.(*v1beta1.BottlerocketBootstrapContainer), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.BottlerocketBootstrapContainer)(nil), (*BottlerocketBootstrapContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_BottlerocketBootstrapContainer_To_v1alpha3_BottlerocketBootstrapContainer(a.(*v1beta1.BottlerocketBootstrapContainer), b.(*BottlerocketBootstrapContainer), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*BottlerocketConfig)(nil), (*v1beta1.BottlerocketConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_BottlerocketConfig_To_v1beta1_BottlerocketConfig(a.(*BottlerocketConfig), b.(*v1beta1.BottlerocketConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*BottlerocketHostContainer)(nil), (*v1beta1.BottlerocketHostContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_BottlerocketHostContainer_To_v1beta1_BottlerocketHostContainer(a.(*BottlerocketHostContainer), b.(*v1beta1.BottlerocketHostContainer), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.BottlerocketHostContainer)(nil), (*BottlerocketHostContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_BottlerocketHostContainer_To_v1alpha3_BottlerocketHostContainer(a.(*v1beta1.BottlerocketHostContainer), b.(*BottlerocketHostContainer), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*CloudInitConfig)(nil), (*v1beta1.CloudInitConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_CloudInitConfig_To_v1beta1_CloudInitConfig(a.(*CloudInitConfig), b.(*v1beta1.CloudInitConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.CloudInitConfig)(nil), (*CloudInitConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_CloudInitConfig_To_v1alpha3_CloudInitConfig(a.(*v1beta1.CloudInitConfig), b.(*CloudInitConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*EtcdadmConfig)(nil), (*v1beta1.EtcdadmConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmConfig_To_v1beta1_EtcdadmConfig(a.(*EtcdadmConfig), b.(*v1beta1.EtcdadmConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmConfig)(nil), (*EtcdadmConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmConfig_To_v1alpha3_EtcdadmConfig(a.(*v1beta1.EtcdadmConfig), b.(*EtcdadmConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*EtcdadmConfigList)(nil), (*v1beta1.EtcdadmConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmConfigList_To_v1beta1_EtcdadmConfigList(a.(*EtcdadmConfigList), b.(*v1beta1.EtcdadmConfigList), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmConfigList)(nil), (*EtcdadmConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmConfigList_To_v1alpha3_EtcdadmConfigList(a.(*v1beta1.EtcdadmConfigList), b.(*EtcdadmConfigList), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*EtcdadmConfigSpec)(nil), (*v1beta1.EtcdadmConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmConfigSpec_To_v1beta1_EtcdadmConfigSpec(a.(*EtcdadmConfigSpec), b.(*v1beta1.EtcdadmConfigSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmConfigSpec)(nil), (*EtcdadmConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmConfigSpec_To_v1alpha3_EtcdadmConfigSpec(a.(*v1beta1.EtcdadmConfigSpec), b.(*EtcdadmConfigSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*EtcdadmConfigStatus)(nil), (*v1beta1.EtcdadmConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmConfigStatus_To_v1beta1_EtcdadmConfigStatus(a.(*EtcdadmConfigStatus), b.(*v1beta1.EtcdadmConfigStatus), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmConfigStatus)(nil), (*EtcdadmConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmConfigStatus_To_v1alpha3_EtcdadmConfigStatus(a.(*v1beta1.EtcdadmConfigStatus), b.(*EtcdadmConfigStatus), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ProxyConfiguration)(nil), (*v1beta1.ProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_ProxyConfiguration_To_v1beta1_ProxyConfiguration(a.(*ProxyConfiguration), b.(*v1beta1.ProxyConfiguration), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.ProxyConfiguration)(nil), (*ProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ProxyConfiguration_To_v1alpha3_ProxyConfiguration(a.(*v1beta1.ProxyConfiguration), b.(*ProxyConfiguration), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*RegistryMirrorConfiguration)(nil), (*v1beta1.RegistryMirrorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_RegistryMirrorConfiguration_To_v1beta1_RegistryMirrorConfiguration(a.(*RegistryMirrorConfiguration), b.(*v1beta1.RegistryMirrorConfiguration), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.RegistryMirrorConfiguration)(nil), (*RegistryMirrorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_RegistryMirrorConfiguration_To_v1alpha3_RegistryMirrorConfiguration(a.(*v1beta1.RegistryMirrorConfiguration), b.(*RegistryMirrorConfiguration), scope) }); err != nil { return err } if err := s.AddConversionFunc((*v1beta1.BottlerocketConfig)(nil), (*BottlerocketConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_BottlerocketConfig_To_v1alpha3_BottlerocketConfig(a.(*v1beta1.BottlerocketConfig), b.(*BottlerocketConfig), scope) }); err != nil { return err } return nil } func autoConvert_v1alpha3_BottlerocketBootstrapContainer_To_v1beta1_BottlerocketBootstrapContainer(in *BottlerocketBootstrapContainer, out *v1beta1.BottlerocketBootstrapContainer, s conversion.Scope) error { out.Name = in.Name out.Image = in.Image out.Essential = in.Essential out.Mode = in.Mode out.UserData = in.UserData return nil } // Convert_v1alpha3_BottlerocketBootstrapContainer_To_v1beta1_BottlerocketBootstrapContainer is an autogenerated conversion function. func Convert_v1alpha3_BottlerocketBootstrapContainer_To_v1beta1_BottlerocketBootstrapContainer(in *BottlerocketBootstrapContainer, out *v1beta1.BottlerocketBootstrapContainer, s conversion.Scope) error { return autoConvert_v1alpha3_BottlerocketBootstrapContainer_To_v1beta1_BottlerocketBootstrapContainer(in, out, s) } func autoConvert_v1beta1_BottlerocketBootstrapContainer_To_v1alpha3_BottlerocketBootstrapContainer(in *v1beta1.BottlerocketBootstrapContainer, out *BottlerocketBootstrapContainer, s conversion.Scope) error { out.Name = in.Name out.Image = in.Image out.Essential = in.Essential out.Mode = in.Mode out.UserData = in.UserData return nil } // Convert_v1beta1_BottlerocketBootstrapContainer_To_v1alpha3_BottlerocketBootstrapContainer is an autogenerated conversion function. func Convert_v1beta1_BottlerocketBootstrapContainer_To_v1alpha3_BottlerocketBootstrapContainer(in *v1beta1.BottlerocketBootstrapContainer, out *BottlerocketBootstrapContainer, s conversion.Scope) error { return autoConvert_v1beta1_BottlerocketBootstrapContainer_To_v1alpha3_BottlerocketBootstrapContainer(in, out, s) } func autoConvert_v1alpha3_BottlerocketConfig_To_v1beta1_BottlerocketConfig(in *BottlerocketConfig, out *v1beta1.BottlerocketConfig, s conversion.Scope) error { out.EtcdImage = in.EtcdImage out.BootstrapImage = in.BootstrapImage out.AdminImage = in.AdminImage out.ControlImage = in.ControlImage out.PauseImage = in.PauseImage out.CustomHostContainers = *(*[]v1beta1.BottlerocketHostContainer)(unsafe.Pointer(&in.CustomHostContainers)) out.CustomBootstrapContainers = *(*[]v1beta1.BottlerocketBootstrapContainer)(unsafe.Pointer(&in.CustomBootstrapContainers)) out.Kernel = (*apiv1beta1.BottlerocketKernelSettings)(unsafe.Pointer(in.Kernel)) out.Boot = (*apiv1beta1.BottlerocketBootSettings)(unsafe.Pointer(in.Boot)) return nil } // Convert_v1alpha3_BottlerocketConfig_To_v1beta1_BottlerocketConfig is an autogenerated conversion function. func Convert_v1alpha3_BottlerocketConfig_To_v1beta1_BottlerocketConfig(in *BottlerocketConfig, out *v1beta1.BottlerocketConfig, s conversion.Scope) error { return autoConvert_v1alpha3_BottlerocketConfig_To_v1beta1_BottlerocketConfig(in, out, s) } func autoConvert_v1beta1_BottlerocketConfig_To_v1alpha3_BottlerocketConfig(in *v1beta1.BottlerocketConfig, out *BottlerocketConfig, s conversion.Scope) error { out.EtcdImage = in.EtcdImage out.BootstrapImage = in.BootstrapImage out.AdminImage = in.AdminImage out.ControlImage = in.ControlImage out.PauseImage = in.PauseImage out.CustomHostContainers = *(*[]BottlerocketHostContainer)(unsafe.Pointer(&in.CustomHostContainers)) out.CustomBootstrapContainers = *(*[]BottlerocketBootstrapContainer)(unsafe.Pointer(&in.CustomBootstrapContainers)) out.Kernel = (*apiv1beta1.BottlerocketKernelSettings)(unsafe.Pointer(in.Kernel)) out.Boot = (*apiv1beta1.BottlerocketBootSettings)(unsafe.Pointer(in.Boot)) return nil } func autoConvert_v1alpha3_BottlerocketHostContainer_To_v1beta1_BottlerocketHostContainer(in *BottlerocketHostContainer, out *v1beta1.BottlerocketHostContainer, s conversion.Scope) error { out.Name = in.Name out.Superpowered = in.Superpowered out.Image = in.Image out.UserData = in.UserData return nil } // Convert_v1alpha3_BottlerocketHostContainer_To_v1beta1_BottlerocketHostContainer is an autogenerated conversion function. func Convert_v1alpha3_BottlerocketHostContainer_To_v1beta1_BottlerocketHostContainer(in *BottlerocketHostContainer, out *v1beta1.BottlerocketHostContainer, s conversion.Scope) error { return autoConvert_v1alpha3_BottlerocketHostContainer_To_v1beta1_BottlerocketHostContainer(in, out, s) } func autoConvert_v1beta1_BottlerocketHostContainer_To_v1alpha3_BottlerocketHostContainer(in *v1beta1.BottlerocketHostContainer, out *BottlerocketHostContainer, s conversion.Scope) error { out.Name = in.Name out.Superpowered = in.Superpowered out.Image = in.Image out.UserData = in.UserData return nil } // Convert_v1beta1_BottlerocketHostContainer_To_v1alpha3_BottlerocketHostContainer is an autogenerated conversion function. func Convert_v1beta1_BottlerocketHostContainer_To_v1alpha3_BottlerocketHostContainer(in *v1beta1.BottlerocketHostContainer, out *BottlerocketHostContainer, s conversion.Scope) error { return autoConvert_v1beta1_BottlerocketHostContainer_To_v1alpha3_BottlerocketHostContainer(in, out, s) } func autoConvert_v1alpha3_CloudInitConfig_To_v1beta1_CloudInitConfig(in *CloudInitConfig, out *v1beta1.CloudInitConfig, s conversion.Scope) error { out.Version = in.Version out.EtcdReleaseURL = in.EtcdReleaseURL out.InstallDir = in.InstallDir return nil } // Convert_v1alpha3_CloudInitConfig_To_v1beta1_CloudInitConfig is an autogenerated conversion function. func Convert_v1alpha3_CloudInitConfig_To_v1beta1_CloudInitConfig(in *CloudInitConfig, out *v1beta1.CloudInitConfig, s conversion.Scope) error { return autoConvert_v1alpha3_CloudInitConfig_To_v1beta1_CloudInitConfig(in, out, s) } func autoConvert_v1beta1_CloudInitConfig_To_v1alpha3_CloudInitConfig(in *v1beta1.CloudInitConfig, out *CloudInitConfig, s conversion.Scope) error { out.Version = in.Version out.EtcdReleaseURL = in.EtcdReleaseURL out.InstallDir = in.InstallDir return nil } // Convert_v1beta1_CloudInitConfig_To_v1alpha3_CloudInitConfig is an autogenerated conversion function. func Convert_v1beta1_CloudInitConfig_To_v1alpha3_CloudInitConfig(in *v1beta1.CloudInitConfig, out *CloudInitConfig, s conversion.Scope) error { return autoConvert_v1beta1_CloudInitConfig_To_v1alpha3_CloudInitConfig(in, out, s) } func autoConvert_v1alpha3_EtcdadmConfig_To_v1beta1_EtcdadmConfig(in *EtcdadmConfig, out *v1beta1.EtcdadmConfig, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha3_EtcdadmConfigSpec_To_v1beta1_EtcdadmConfigSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1alpha3_EtcdadmConfigStatus_To_v1beta1_EtcdadmConfigStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1alpha3_EtcdadmConfig_To_v1beta1_EtcdadmConfig is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmConfig_To_v1beta1_EtcdadmConfig(in *EtcdadmConfig, out *v1beta1.EtcdadmConfig, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmConfig_To_v1beta1_EtcdadmConfig(in, out, s) } func autoConvert_v1beta1_EtcdadmConfig_To_v1alpha3_EtcdadmConfig(in *v1beta1.EtcdadmConfig, out *EtcdadmConfig, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_EtcdadmConfigSpec_To_v1alpha3_EtcdadmConfigSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1beta1_EtcdadmConfigStatus_To_v1alpha3_EtcdadmConfigStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1beta1_EtcdadmConfig_To_v1alpha3_EtcdadmConfig is an autogenerated conversion function. func Convert_v1beta1_EtcdadmConfig_To_v1alpha3_EtcdadmConfig(in *v1beta1.EtcdadmConfig, out *EtcdadmConfig, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmConfig_To_v1alpha3_EtcdadmConfig(in, out, s) } func autoConvert_v1alpha3_EtcdadmConfigList_To_v1beta1_EtcdadmConfigList(in *EtcdadmConfigList, out *v1beta1.EtcdadmConfigList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.EtcdadmConfig, len(*in)) for i := range *in { if err := Convert_v1alpha3_EtcdadmConfig_To_v1beta1_EtcdadmConfig(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // Convert_v1alpha3_EtcdadmConfigList_To_v1beta1_EtcdadmConfigList is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmConfigList_To_v1beta1_EtcdadmConfigList(in *EtcdadmConfigList, out *v1beta1.EtcdadmConfigList, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmConfigList_To_v1beta1_EtcdadmConfigList(in, out, s) } func autoConvert_v1beta1_EtcdadmConfigList_To_v1alpha3_EtcdadmConfigList(in *v1beta1.EtcdadmConfigList, out *EtcdadmConfigList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EtcdadmConfig, len(*in)) for i := range *in { if err := Convert_v1beta1_EtcdadmConfig_To_v1alpha3_EtcdadmConfig(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // Convert_v1beta1_EtcdadmConfigList_To_v1alpha3_EtcdadmConfigList is an autogenerated conversion function. func Convert_v1beta1_EtcdadmConfigList_To_v1alpha3_EtcdadmConfigList(in *v1beta1.EtcdadmConfigList, out *EtcdadmConfigList, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmConfigList_To_v1alpha3_EtcdadmConfigList(in, out, s) } func autoConvert_v1alpha3_EtcdadmConfigSpec_To_v1beta1_EtcdadmConfigSpec(in *EtcdadmConfigSpec, out *v1beta1.EtcdadmConfigSpec, s conversion.Scope) error { out.Users = *(*[]apiv1beta1.User)(unsafe.Pointer(&in.Users)) out.EtcdadmBuiltin = in.EtcdadmBuiltin out.EtcdadmInstallCommands = *(*[]string)(unsafe.Pointer(&in.EtcdadmInstallCommands)) out.PreEtcdadmCommands = *(*[]string)(unsafe.Pointer(&in.PreEtcdadmCommands)) out.PostEtcdadmCommands = *(*[]string)(unsafe.Pointer(&in.PostEtcdadmCommands)) out.Format = v1beta1.Format(in.Format) if in.BottlerocketConfig != nil { in, out := &in.BottlerocketConfig, &out.BottlerocketConfig *out = new(v1beta1.BottlerocketConfig) if err := Convert_v1alpha3_BottlerocketConfig_To_v1beta1_BottlerocketConfig(*in, *out, s); err != nil { return err } } else { out.BottlerocketConfig = nil } out.CloudInitConfig = (*v1beta1.CloudInitConfig)(unsafe.Pointer(in.CloudInitConfig)) out.Files = *(*[]apiv1beta1.File)(unsafe.Pointer(&in.Files)) out.Proxy = (*v1beta1.ProxyConfiguration)(unsafe.Pointer(in.Proxy)) out.RegistryMirror = (*v1beta1.RegistryMirrorConfiguration)(unsafe.Pointer(in.RegistryMirror)) out.CipherSuites = in.CipherSuites out.NTP = (*apiv1beta1.NTP)(unsafe.Pointer(in.NTP)) out.CertBundles = *(*[]apiv1beta1.CertBundle)(unsafe.Pointer(&in.CertBundles)) return nil } // Convert_v1alpha3_EtcdadmConfigSpec_To_v1beta1_EtcdadmConfigSpec is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmConfigSpec_To_v1beta1_EtcdadmConfigSpec(in *EtcdadmConfigSpec, out *v1beta1.EtcdadmConfigSpec, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmConfigSpec_To_v1beta1_EtcdadmConfigSpec(in, out, s) } func autoConvert_v1beta1_EtcdadmConfigSpec_To_v1alpha3_EtcdadmConfigSpec(in *v1beta1.EtcdadmConfigSpec, out *EtcdadmConfigSpec, s conversion.Scope) error { out.Users = *(*[]apiv1beta1.User)(unsafe.Pointer(&in.Users)) out.EtcdadmBuiltin = in.EtcdadmBuiltin out.EtcdadmInstallCommands = *(*[]string)(unsafe.Pointer(&in.EtcdadmInstallCommands)) out.PreEtcdadmCommands = *(*[]string)(unsafe.Pointer(&in.PreEtcdadmCommands)) out.PostEtcdadmCommands = *(*[]string)(unsafe.Pointer(&in.PostEtcdadmCommands)) out.Format = Format(in.Format) if in.BottlerocketConfig != nil { in, out := &in.BottlerocketConfig, &out.BottlerocketConfig *out = new(BottlerocketConfig) if err := Convert_v1beta1_BottlerocketConfig_To_v1alpha3_BottlerocketConfig(*in, *out, s); err != nil { return err } } else { out.BottlerocketConfig = nil } out.CloudInitConfig = (*CloudInitConfig)(unsafe.Pointer(in.CloudInitConfig)) out.Files = *(*[]apiv1beta1.File)(unsafe.Pointer(&in.Files)) out.Proxy = (*ProxyConfiguration)(unsafe.Pointer(in.Proxy)) out.RegistryMirror = (*RegistryMirrorConfiguration)(unsafe.Pointer(in.RegistryMirror)) out.CipherSuites = in.CipherSuites out.NTP = (*apiv1beta1.NTP)(unsafe.Pointer(in.NTP)) out.CertBundles = *(*[]apiv1beta1.CertBundle)(unsafe.Pointer(&in.CertBundles)) return nil } // Convert_v1beta1_EtcdadmConfigSpec_To_v1alpha3_EtcdadmConfigSpec is an autogenerated conversion function. func Convert_v1beta1_EtcdadmConfigSpec_To_v1alpha3_EtcdadmConfigSpec(in *v1beta1.EtcdadmConfigSpec, out *EtcdadmConfigSpec, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmConfigSpec_To_v1alpha3_EtcdadmConfigSpec(in, out, s) } func autoConvert_v1alpha3_EtcdadmConfigStatus_To_v1beta1_EtcdadmConfigStatus(in *EtcdadmConfigStatus, out *v1beta1.EtcdadmConfigStatus, s conversion.Scope) error { out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) out.Ready = in.Ready return nil } // Convert_v1alpha3_EtcdadmConfigStatus_To_v1beta1_EtcdadmConfigStatus is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmConfigStatus_To_v1beta1_EtcdadmConfigStatus(in *EtcdadmConfigStatus, out *v1beta1.EtcdadmConfigStatus, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmConfigStatus_To_v1beta1_EtcdadmConfigStatus(in, out, s) } func autoConvert_v1beta1_EtcdadmConfigStatus_To_v1alpha3_EtcdadmConfigStatus(in *v1beta1.EtcdadmConfigStatus, out *EtcdadmConfigStatus, s conversion.Scope) error { out.Conditions = *(*apiv1alpha3.Conditions)(unsafe.Pointer(&in.Conditions)) out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) out.Ready = in.Ready return nil } // Convert_v1beta1_EtcdadmConfigStatus_To_v1alpha3_EtcdadmConfigStatus is an autogenerated conversion function. func Convert_v1beta1_EtcdadmConfigStatus_To_v1alpha3_EtcdadmConfigStatus(in *v1beta1.EtcdadmConfigStatus, out *EtcdadmConfigStatus, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmConfigStatus_To_v1alpha3_EtcdadmConfigStatus(in, out, s) } func autoConvert_v1alpha3_ProxyConfiguration_To_v1beta1_ProxyConfiguration(in *ProxyConfiguration, out *v1beta1.ProxyConfiguration, s conversion.Scope) error { out.HTTPProxy = in.HTTPProxy out.HTTPSProxy = in.HTTPSProxy out.NoProxy = *(*[]string)(unsafe.Pointer(&in.NoProxy)) return nil } // Convert_v1alpha3_ProxyConfiguration_To_v1beta1_ProxyConfiguration is an autogenerated conversion function. func Convert_v1alpha3_ProxyConfiguration_To_v1beta1_ProxyConfiguration(in *ProxyConfiguration, out *v1beta1.ProxyConfiguration, s conversion.Scope) error { return autoConvert_v1alpha3_ProxyConfiguration_To_v1beta1_ProxyConfiguration(in, out, s) } func autoConvert_v1beta1_ProxyConfiguration_To_v1alpha3_ProxyConfiguration(in *v1beta1.ProxyConfiguration, out *ProxyConfiguration, s conversion.Scope) error { out.HTTPProxy = in.HTTPProxy out.HTTPSProxy = in.HTTPSProxy out.NoProxy = *(*[]string)(unsafe.Pointer(&in.NoProxy)) return nil } // Convert_v1beta1_ProxyConfiguration_To_v1alpha3_ProxyConfiguration is an autogenerated conversion function. func Convert_v1beta1_ProxyConfiguration_To_v1alpha3_ProxyConfiguration(in *v1beta1.ProxyConfiguration, out *ProxyConfiguration, s conversion.Scope) error { return autoConvert_v1beta1_ProxyConfiguration_To_v1alpha3_ProxyConfiguration(in, out, s) } func autoConvert_v1alpha3_RegistryMirrorConfiguration_To_v1beta1_RegistryMirrorConfiguration(in *RegistryMirrorConfiguration, out *v1beta1.RegistryMirrorConfiguration, s conversion.Scope) error { out.Endpoint = in.Endpoint out.CACert = in.CACert return nil } // Convert_v1alpha3_RegistryMirrorConfiguration_To_v1beta1_RegistryMirrorConfiguration is an autogenerated conversion function. func Convert_v1alpha3_RegistryMirrorConfiguration_To_v1beta1_RegistryMirrorConfiguration(in *RegistryMirrorConfiguration, out *v1beta1.RegistryMirrorConfiguration, s conversion.Scope) error { return autoConvert_v1alpha3_RegistryMirrorConfiguration_To_v1beta1_RegistryMirrorConfiguration(in, out, s) } func autoConvert_v1beta1_RegistryMirrorConfiguration_To_v1alpha3_RegistryMirrorConfiguration(in *v1beta1.RegistryMirrorConfiguration, out *RegistryMirrorConfiguration, s conversion.Scope) error { out.Endpoint = in.Endpoint out.CACert = in.CACert return nil } // Convert_v1beta1_RegistryMirrorConfiguration_To_v1alpha3_RegistryMirrorConfiguration is an autogenerated conversion function. func Convert_v1beta1_RegistryMirrorConfiguration_To_v1alpha3_RegistryMirrorConfiguration(in *v1beta1.RegistryMirrorConfiguration, out *RegistryMirrorConfiguration, s conversion.Scope) error { return autoConvert_v1beta1_RegistryMirrorConfiguration_To_v1alpha3_RegistryMirrorConfiguration(in, out, s) }
458
etcdadm-bootstrap-provider
aws
Go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by controller-gen. DO NOT EDIT. package v1alpha3 import ( "k8s.io/apimachinery/pkg/runtime" apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BottlerocketBootstrapContainer) DeepCopyInto(out *BottlerocketBootstrapContainer) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BottlerocketBootstrapContainer. func (in *BottlerocketBootstrapContainer) DeepCopy() *BottlerocketBootstrapContainer { if in == nil { return nil } out := new(BottlerocketBootstrapContainer) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BottlerocketConfig) DeepCopyInto(out *BottlerocketConfig) { *out = *in if in.CustomHostContainers != nil { in, out := &in.CustomHostContainers, &out.CustomHostContainers *out = make([]BottlerocketHostContainer, len(*in)) copy(*out, *in) } if in.CustomBootstrapContainers != nil { in, out := &in.CustomBootstrapContainers, &out.CustomBootstrapContainers *out = make([]BottlerocketBootstrapContainer, len(*in)) copy(*out, *in) } if in.Kernel != nil { in, out := &in.Kernel, &out.Kernel *out = new(v1beta1.BottlerocketKernelSettings) (*in).DeepCopyInto(*out) } if in.Boot != nil { in, out := &in.Boot, &out.Boot *out = new(v1beta1.BottlerocketBootSettings) (*in).DeepCopyInto(*out) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BottlerocketConfig. func (in *BottlerocketConfig) DeepCopy() *BottlerocketConfig { if in == nil { return nil } out := new(BottlerocketConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BottlerocketHostContainer) DeepCopyInto(out *BottlerocketHostContainer) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BottlerocketHostContainer. func (in *BottlerocketHostContainer) DeepCopy() *BottlerocketHostContainer { if in == nil { return nil } out := new(BottlerocketHostContainer) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloudInitConfig) DeepCopyInto(out *CloudInitConfig) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInitConfig. func (in *CloudInitConfig) DeepCopy() *CloudInitConfig { if in == nil { return nil } out := new(CloudInitConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfig) DeepCopyInto(out *EtcdadmConfig) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfig. func (in *EtcdadmConfig) DeepCopy() *EtcdadmConfig { if in == nil { return nil } out := new(EtcdadmConfig) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmConfig) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfigList) DeepCopyInto(out *EtcdadmConfigList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EtcdadmConfig, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfigList. func (in *EtcdadmConfigList) DeepCopy() *EtcdadmConfigList { if in == nil { return nil } out := new(EtcdadmConfigList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmConfigList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfigSpec) DeepCopyInto(out *EtcdadmConfigSpec) { *out = *in if in.Users != nil { in, out := &in.Users, &out.Users *out = make([]v1beta1.User, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.EtcdadmInstallCommands != nil { in, out := &in.EtcdadmInstallCommands, &out.EtcdadmInstallCommands *out = make([]string, len(*in)) copy(*out, *in) } if in.PreEtcdadmCommands != nil { in, out := &in.PreEtcdadmCommands, &out.PreEtcdadmCommands *out = make([]string, len(*in)) copy(*out, *in) } if in.PostEtcdadmCommands != nil { in, out := &in.PostEtcdadmCommands, &out.PostEtcdadmCommands *out = make([]string, len(*in)) copy(*out, *in) } if in.BottlerocketConfig != nil { in, out := &in.BottlerocketConfig, &out.BottlerocketConfig *out = new(BottlerocketConfig) (*in).DeepCopyInto(*out) } if in.CloudInitConfig != nil { in, out := &in.CloudInitConfig, &out.CloudInitConfig *out = new(CloudInitConfig) **out = **in } if in.Files != nil { in, out := &in.Files, &out.Files *out = make([]v1beta1.File, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Proxy != nil { in, out := &in.Proxy, &out.Proxy *out = new(ProxyConfiguration) (*in).DeepCopyInto(*out) } if in.RegistryMirror != nil { in, out := &in.RegistryMirror, &out.RegistryMirror *out = new(RegistryMirrorConfiguration) **out = **in } if in.NTP != nil { in, out := &in.NTP, &out.NTP *out = new(v1beta1.NTP) (*in).DeepCopyInto(*out) } if in.CertBundles != nil { in, out := &in.CertBundles, &out.CertBundles *out = make([]v1beta1.CertBundle, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfigSpec. func (in *EtcdadmConfigSpec) DeepCopy() *EtcdadmConfigSpec { if in == nil { return nil } out := new(EtcdadmConfigSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfigStatus) DeepCopyInto(out *EtcdadmConfigStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make(apiv1alpha3.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.DataSecretName != nil { in, out := &in.DataSecretName, &out.DataSecretName *out = new(string) **out = **in } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfigStatus. func (in *EtcdadmConfigStatus) DeepCopy() *EtcdadmConfigStatus { if in == nil { return nil } out := new(EtcdadmConfigStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyConfiguration) DeepCopyInto(out *ProxyConfiguration) { *out = *in if in.NoProxy != nil { in, out := &in.NoProxy, &out.NoProxy *out = make([]string, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfiguration. func (in *ProxyConfiguration) DeepCopy() *ProxyConfiguration { if in == nil { return nil } out := new(ProxyConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistryMirrorConfiguration) DeepCopyInto(out *RegistryMirrorConfiguration) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryMirrorConfiguration. func (in *RegistryMirrorConfiguration) DeepCopy() *RegistryMirrorConfiguration { if in == nil { return nil } out := new(RegistryMirrorConfiguration) in.DeepCopyInto(out) return out }
304
etcdadm-bootstrap-provider
aws
Go
package v1beta1 // Hub marks EtcdadmConfig as a conversion hub. func (*EtcdadmConfig) Hub() {} // Hub marks EtcdadmConfigList as a conversion hub. func (*EtcdadmConfigList) Hub() {}
8
etcdadm-bootstrap-provider
aws
Go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1beta1 contains API Schema definitions for the etcd boostrap v1beta1 API group // +kubebuilder:object:generate=true // +groupName=bootstrap.cluster.x-k8s.io package v1beta1
21
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capbk "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. const ( DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" // CloudConfig make the bootstrap data to be of cloud-config format. CloudConfig Format = "cloud-config" // Bottlerocket make the bootstrap data to be of bottlerocket format. Bottlerocket Format = "bottlerocket" ) // Format specifies the output format of the bootstrap data // +kubebuilder:validation:Enum=cloud-config;bottlerocket type Format string // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // EtcdadmConfigSpec defines the desired state of EtcdadmConfig type EtcdadmConfigSpec struct { // Users specifies extra users to add // +optional Users []capbk.User `json:"users,omitempty"` // +optional EtcdadmBuiltin bool `json:"etcdadmBuiltin,omitempty"` // +optional EtcdadmInstallCommands []string `json:"etcdadmInstallCommands,omitempty"` // PreEtcdadmCommands specifies extra commands to run before kubeadm runs // +optional PreEtcdadmCommands []string `json:"preEtcdadmCommands,omitempty"` // PostEtcdadmCommands specifies extra commands to run after kubeadm runs // +optional PostEtcdadmCommands []string `json:"postEtcdadmCommands,omitempty"` // Format specifies the output format of the bootstrap data // +optional Format Format `json:"format,omitempty"` // BottlerocketConfig specifies the configuration for the bottlerocket bootstrap data // +optional BottlerocketConfig *BottlerocketConfig `json:"bottlerocketConfig,omitempty"` // CloudInitConfig specifies the configuration for the cloud-init bootstrap data // +optional CloudInitConfig *CloudInitConfig `json:"cloudInitConfig,omitempty"` // Files specifies extra files to be passed to user_data upon creation. // +optional Files []capbk.File `json:"files,omitempty"` // Proxy holds the https and no proxy information // This is only used for bottlerocket // +optional Proxy *ProxyConfiguration `json:"proxy,omitempty"` // RegistryMirror holds the image registry mirror information // This is only used for bottlerocket // +optional RegistryMirror *RegistryMirrorConfiguration `json:"registryMirror,omitempty"` // CipherSuites is a list of comma-delimited supported TLS cipher suites, mapping to the --cipher-suites flag. // Default is empty, which means that they will be auto-populated by Go. // +optional CipherSuites string `json:"cipherSuites,omitempty"` // NTP specifies NTP configuration // +optional NTP *capbk.NTP `json:"ntp,omitempty"` // Certbundle holds additional cert bundles. // +optional CertBundles []capbk.CertBundle `json:"certBundles,omitempty"` } type BottlerocketConfig struct { // EtcdImage specifies the etcd image to use by etcdadm EtcdImage string `json:"etcdImage,omitempty"` // BootstrapImage specifies the container image to use for bottlerocket's bootstrapping BootstrapImage string `json:"bootstrapImage"` // AdminImage specifies the admin container image to use for bottlerocket. // +optional AdminImage string `json:"adminImage,omitempty"` // ControlImage specifies the control container image to use for bottlerocket. // +optional ControlImage string `json:"controlImage,omitempty"` // PauseImage specifies the image to use for the pause container PauseImage string `json:"pauseImage"` // CustomHostContainers adds additional host containers for bottlerocket. // +optional CustomHostContainers []BottlerocketHostContainer `json:"customHostContainers,omitempty"` // CustomBootstrapContainers adds additional bootstrap containers for bottlerocket. // +optional CustomBootstrapContainers []BottlerocketBootstrapContainer `json:"customBootstrapContainers,omitempty"` // Kernel specifies additional kernel settings for bottlerocket Kernel *capbk.BottlerocketKernelSettings `json:"kernel,omitempty"` // Boot specifies boot settings for bottlerocket Boot *capbk.BottlerocketBootSettings `json:"boot,omitempty"` } // BottlerocketHostContainer holds the host container setting for bottlerocket. type BottlerocketHostContainer struct { // Name is the host container name that will be given to the container in BR's `apiserver` // +kubebuilder:validation:Required Name string `json:"name"` // Superpowered indicates if the container will be superpowered // +kubebuilder:validation:Required Superpowered bool `json:"superpowered"` // Image is the actual location of the host container image. Image string `json:"image"` // UserData is the userdata that will be attached to the image. // +optional UserData string `json:"userData,omitempty"` } // BottlerocketBootstrapContainer holds the bootstrap container setting for bottlerocket. type BottlerocketBootstrapContainer struct { // Name is the bootstrap container name that will be given to the container in BR's `apiserver`. // +kubebuilder:validation:Required Name string `json:"name"` // Image is the actual image used for Bottlerocket bootstrap. Image string `json:"image"` // Essential decides whether or not the container should fail the boot process. // Bootstrap containers configured with essential = true will stop the boot process if they exit code is a non-zero value. // Default is false. // +optional Essential bool `json:"essential"` // Mode represents the bootstrap container mode. // +kubebuilder:validation:Enum=always;off;once Mode string `json:"mode"` // UserData is the base64-encoded userdata. // +optional UserData string `json:"userData,omitempty"` } type CloudInitConfig struct { // +optional Version string `json:"version,omitempty"` // EtcdReleaseURL is an optional field to specify where etcdadm can download etcd from // +optional EtcdReleaseURL string `json:"etcdReleaseURL,omitempty"` // InstallDir is an optional field to specify where etcdadm will extract etcd binaries to // +optional InstallDir string `json:"installDir,omitempty"` } // ProxyConfiguration holds the settings for proxying bottlerocket services type ProxyConfiguration struct { // HTTP Proxy HTTPProxy string `json:"httpProxy,omitempty"` // HTTPS proxy HTTPSProxy string `json:"httpsProxy,omitempty"` // No proxy, list of ips that should not use proxy NoProxy []string `json:"noProxy,omitempty"` } // RegistryMirrorConfiguration holds the settings for image registry mirror type RegistryMirrorConfiguration struct { // Endpoint defines the registry mirror endpoint to use for pulling images Endpoint string `json:"endpoint,omitempty"` // CACert defines the CA cert for the registry mirror CACert string `json:"caCert,omitempty"` } // EtcdadmConfigStatus defines the observed state of EtcdadmConfig type EtcdadmConfigStatus struct { // Conditions defines current service state of the KubeadmConfig. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` DataSecretName *string `json:"dataSecretName,omitempty"` Ready bool `json:"ready,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:storageversion // +kubebuilder:subresource:status // EtcdadmConfig is the Schema for the etcdadmconfigs API type EtcdadmConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec EtcdadmConfigSpec `json:"spec,omitempty"` Status EtcdadmConfigStatus `json:"status,omitempty"` } func (e *EtcdadmConfig) GetConditions() clusterv1.Conditions { return e.Status.Conditions } func (e *EtcdadmConfig) SetConditions(conditions clusterv1.Conditions) { e.Status.Conditions = conditions } // +kubebuilder:object:root=true // EtcdadmConfigList contains a list of EtcdadmConfig type EtcdadmConfigList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []EtcdadmConfig `json:"items"` } func init() { SchemeBuilder.Register(&EtcdadmConfig{}, &EtcdadmConfigList{}) }
253
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" ) // log is for logging in this package. var etcdadmconfiglog = logf.Log.WithName("etcdadmconfig-resource") func (r *EtcdadmConfig) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). Complete() } // +kubebuilder:webhook:path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-etcdadmconfig,mutating=true,failurePolicy=fail,groups=bootstrap.cluster.x-k8s.io,resources=etcdadmconfigs,verbs=create;update,versions=v1beta1,name=metcdadmconfig.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.Defaulter = &EtcdadmConfig{} // Default implements webhook.Defaulter so a webhook will be registered for the type func (r *EtcdadmConfig) Default() { etcdadmconfiglog.Info("default", "name", r.Name) // TODO(user): fill in your defaulting logic. } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. // +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-etcdadmconfig,mutating=false,failurePolicy=fail,groups=bootstrap.cluster.x-k8s.io,resources=etcdadmconfigs,versions=v1beta1,name=vetcdadmconfig.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.Validator = &EtcdadmConfig{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (r *EtcdadmConfig) ValidateCreate() error { etcdadmconfiglog.Info("validate create", "name", r.Name) // TODO(user): fill in your validation logic upon object creation. return nil } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type func (r *EtcdadmConfig) ValidateUpdate(old runtime.Object) error { etcdadmconfiglog.Info("validate update", "name", r.Name) // TODO(user): fill in your validation logic upon object update. return nil } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type func (r *EtcdadmConfig) ValidateDelete() error { etcdadmconfiglog.Info("validate delete", "name", r.Name) // TODO(user): fill in your validation logic upon object deletion. return nil }
74
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1beta1 contains API Schema definitions for the bootstrap v1beta1 API group // +kubebuilder:object:generate=true // +groupName=bootstrap.cluster.x-k8s.io package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme )
37
etcdadm-bootstrap-provider
aws
Go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by controller-gen. DO NOT EDIT. package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" cluster_apiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" apiv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BottlerocketBootstrapContainer) DeepCopyInto(out *BottlerocketBootstrapContainer) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BottlerocketBootstrapContainer. func (in *BottlerocketBootstrapContainer) DeepCopy() *BottlerocketBootstrapContainer { if in == nil { return nil } out := new(BottlerocketBootstrapContainer) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BottlerocketConfig) DeepCopyInto(out *BottlerocketConfig) { *out = *in if in.CustomHostContainers != nil { in, out := &in.CustomHostContainers, &out.CustomHostContainers *out = make([]BottlerocketHostContainer, len(*in)) copy(*out, *in) } if in.CustomBootstrapContainers != nil { in, out := &in.CustomBootstrapContainers, &out.CustomBootstrapContainers *out = make([]BottlerocketBootstrapContainer, len(*in)) copy(*out, *in) } if in.Kernel != nil { in, out := &in.Kernel, &out.Kernel *out = new(apiv1beta1.BottlerocketKernelSettings) (*in).DeepCopyInto(*out) } if in.Boot != nil { in, out := &in.Boot, &out.Boot *out = new(apiv1beta1.BottlerocketBootSettings) (*in).DeepCopyInto(*out) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BottlerocketConfig. func (in *BottlerocketConfig) DeepCopy() *BottlerocketConfig { if in == nil { return nil } out := new(BottlerocketConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BottlerocketHostContainer) DeepCopyInto(out *BottlerocketHostContainer) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BottlerocketHostContainer. func (in *BottlerocketHostContainer) DeepCopy() *BottlerocketHostContainer { if in == nil { return nil } out := new(BottlerocketHostContainer) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloudInitConfig) DeepCopyInto(out *CloudInitConfig) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInitConfig. func (in *CloudInitConfig) DeepCopy() *CloudInitConfig { if in == nil { return nil } out := new(CloudInitConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfig) DeepCopyInto(out *EtcdadmConfig) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfig. func (in *EtcdadmConfig) DeepCopy() *EtcdadmConfig { if in == nil { return nil } out := new(EtcdadmConfig) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmConfig) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfigList) DeepCopyInto(out *EtcdadmConfigList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EtcdadmConfig, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfigList. func (in *EtcdadmConfigList) DeepCopy() *EtcdadmConfigList { if in == nil { return nil } out := new(EtcdadmConfigList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmConfigList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfigSpec) DeepCopyInto(out *EtcdadmConfigSpec) { *out = *in if in.Users != nil { in, out := &in.Users, &out.Users *out = make([]apiv1beta1.User, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.EtcdadmInstallCommands != nil { in, out := &in.EtcdadmInstallCommands, &out.EtcdadmInstallCommands *out = make([]string, len(*in)) copy(*out, *in) } if in.PreEtcdadmCommands != nil { in, out := &in.PreEtcdadmCommands, &out.PreEtcdadmCommands *out = make([]string, len(*in)) copy(*out, *in) } if in.PostEtcdadmCommands != nil { in, out := &in.PostEtcdadmCommands, &out.PostEtcdadmCommands *out = make([]string, len(*in)) copy(*out, *in) } if in.BottlerocketConfig != nil { in, out := &in.BottlerocketConfig, &out.BottlerocketConfig *out = new(BottlerocketConfig) (*in).DeepCopyInto(*out) } if in.CloudInitConfig != nil { in, out := &in.CloudInitConfig, &out.CloudInitConfig *out = new(CloudInitConfig) **out = **in } if in.Files != nil { in, out := &in.Files, &out.Files *out = make([]apiv1beta1.File, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Proxy != nil { in, out := &in.Proxy, &out.Proxy *out = new(ProxyConfiguration) (*in).DeepCopyInto(*out) } if in.RegistryMirror != nil { in, out := &in.RegistryMirror, &out.RegistryMirror *out = new(RegistryMirrorConfiguration) **out = **in } if in.NTP != nil { in, out := &in.NTP, &out.NTP *out = new(apiv1beta1.NTP) (*in).DeepCopyInto(*out) } if in.CertBundles != nil { in, out := &in.CertBundles, &out.CertBundles *out = make([]apiv1beta1.CertBundle, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfigSpec. func (in *EtcdadmConfigSpec) DeepCopy() *EtcdadmConfigSpec { if in == nil { return nil } out := new(EtcdadmConfigSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmConfigStatus) DeepCopyInto(out *EtcdadmConfigStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make(cluster_apiapiv1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.DataSecretName != nil { in, out := &in.DataSecretName, &out.DataSecretName *out = new(string) **out = **in } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmConfigStatus. func (in *EtcdadmConfigStatus) DeepCopy() *EtcdadmConfigStatus { if in == nil { return nil } out := new(EtcdadmConfigStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyConfiguration) DeepCopyInto(out *ProxyConfiguration) { *out = *in if in.NoProxy != nil { in, out := &in.NoProxy, &out.NoProxy *out = make([]string, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfiguration. func (in *ProxyConfiguration) DeepCopy() *ProxyConfiguration { if in == nil { return nil } out := new(ProxyConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistryMirrorConfiguration) DeepCopyInto(out *RegistryMirrorConfiguration) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryMirrorConfiguration. func (in *RegistryMirrorConfiguration) DeepCopy() *RegistryMirrorConfiguration { if in == nil { return nil } out := new(RegistryMirrorConfiguration) in.DeepCopyInto(out) return out }
304
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "fmt" "path/filepath" "time" "k8s.io/apimachinery/pkg/types" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/internal/locking" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata/bottlerocket" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata/cloudinit" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" ) const stopKubeletCommand = "systemctl stop kubelet" const registrySecretName = "registry-credentials" const registryUsernameKey = "username" const registryPasswordKey = "password" // InitLocker is a lock that is used around etcdadm init type InitLocker interface { Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool } // TODO: replace with etcdadm release var defaultEtcdadmInstallCommands = []string{`curl -OL https://github.com/mrajashree/etcdadm-bootstrap-provider/releases/download/v0.0.0/etcdadm`, `chmod +x etcdadm`, `mv etcdadm /usr/local/bin/etcdadm`} // EtcdadmConfigReconciler reconciles a EtcdadmConfig object type EtcdadmConfigReconciler struct { client.Client Log logr.Logger Scheme *runtime.Scheme EtcdadmInitLock InitLocker } type Scope struct { logr.Logger Config *etcdbootstrapv1.EtcdadmConfig Cluster *clusterv1.Cluster Machine *clusterv1.Machine } func (r *EtcdadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { if r.EtcdadmInitLock == nil { r.EtcdadmInitLock = locking.NewEtcdadmInitMutex(ctrl.LoggerFrom(ctx).WithName("etcd-init-locker"), mgr.GetClient()) } b := ctrl.NewControllerManagedBy(mgr). For(&etcdbootstrapv1.EtcdadmConfig{}). WithEventFilter(predicates.ResourceNotPaused(r.Log)). Watches( &source.Kind{Type: &clusterv1.Machine{}}, handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc), ) c, err := b.Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } err = c.Watch( &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(r.ClusterToEtcdadmConfigs), predicates.ClusterUnpausedAndInfrastructureReady(r.Log), ) if err != nil { return errors.Wrap(err, "failed adding Watch for Clusters to controller manager") } return nil } // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=etcdadmconfigs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=etcdadmconfigs/status,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=configmaps;events;secrets,verbs=get;list;watch;create;update;patch;delete func (r *EtcdadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { log := r.Log.WithValues("etcdadmconfig", req.Name, "namespace", req.Namespace) // Lookup the etcdadm config etcdadmConfig := &etcdbootstrapv1.EtcdadmConfig{} if err := r.Client.Get(ctx, req.NamespacedName, etcdadmConfig); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } log.Error(err, "Failed to get etcdadm config") return ctrl.Result{}, err } // Look up the Machine associated with this EtcdadmConfig resource machine, err := util.GetOwnerMachine(ctx, r.Client, etcdadmConfig.ObjectMeta) if err != nil { if apierrors.IsNotFound(err) { // could not find owning machine, reconcile when owner is set return ctrl.Result{}, nil } log.Error(err, "could not get owner machine for the EtcdadmConfig") return ctrl.Result{}, err } if machine == nil { log.Info("Waiting for Machine Controller to set OwnerRef on the EtcdadmConfig") return ctrl.Result{}, nil } log = log.WithValues("machine-name", machine.Name) // Lookup the cluster the machine is associated with cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { if errors.Cause(err) == util.ErrNoCluster { log.Info("Machine does not belong to a cluster yet, waiting until its part of a cluster") return ctrl.Result{}, nil } if apierrors.IsNotFound(err) { log.Info("Cluster does not exist yet, waiting until it is created") return ctrl.Result{}, nil } log.Error(err, "could not get cluster by machine metadata") return ctrl.Result{}, err } if annotations.IsPaused(cluster, etcdadmConfig) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } // Initialize the patch helper. patchHelper, err := patch.NewHelper(etcdadmConfig, r.Client) if err != nil { return ctrl.Result{}, err } // Attempt to Patch the EtcdadmConfig object and status after each reconciliation if no error occurs. defer func() { // always update the readyCondition; the summary is represented using the "1 of x completed" notation. conditions.SetSummary(etcdadmConfig, conditions.WithConditions( etcdbootstrapv1.DataSecretAvailableCondition, ), ) // Patch ObservedGeneration only if the reconciliation completed successfully patchOpts := []patch.Option{} if rerr == nil { patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) } if err := patchHelper.Patch(ctx, etcdadmConfig, patchOpts...); err != nil { log.Error(err, "Failed to patch etcdadmConfig") if rerr == nil { rerr = err } } }() if etcdadmConfig.Status.Ready { return ctrl.Result{}, nil } scope := Scope{ Logger: log, Config: etcdadmConfig, Cluster: cluster, Machine: machine, } if !conditions.IsTrue(cluster, clusterv1.ManagedExternalEtcdClusterInitializedCondition) { return r.initializeEtcd(ctx, &scope) } // Unlock any locks that might have been set during init process r.EtcdadmInitLock.Unlock(ctx, cluster) res, err := r.joinEtcd(ctx, &scope) if err != nil { return res, err } return ctrl.Result{}, nil } func (r *EtcdadmConfigReconciler) initializeEtcd(ctx context.Context, scope *Scope) (_ ctrl.Result, rerr error) { log := r.Log // acquire the init lock so that only the first machine configured as etcd node gets processed here // if not the first, requeue if !r.EtcdadmInitLock.Lock(ctx, scope.Cluster, scope.Machine) { log.Info("An etcd node is already being initialized, requeing until etcd plane is ready") return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } defer func() { if rerr != nil { r.EtcdadmInitLock.Unlock(ctx, scope.Cluster) } }() log.Info("Creating cloudinit for the init etcd plane") CACertKeyPair := etcdCACertKeyPair() rerr = CACertKeyPair.LookupOrGenerate( ctx, r.Client, util.ObjectKey(scope.Cluster), *metav1.NewControllerRef(scope.Config, etcdbootstrapv1.GroupVersion.WithKind("EtcdadmConfig")), ) initInput := userdata.EtcdPlaneInput{ BaseUserData: userdata.BaseUserData{ Users: scope.Config.Spec.Users, PreEtcdadmCommands: scope.Config.Spec.PreEtcdadmCommands, NTP: scope.Config.Spec.NTP, Hostname: scope.Machine.Name, }, Certificates: CACertKeyPair, } // grab user pass for registry mirror if scope.Config.Spec.RegistryMirror != nil { username, password, err := r.resolveRegistryCredentials(ctx, scope.Config) if err != nil { log.Info("Cannot find secret for registry credentials, proceeding without registry credentials") } else { initInput.RegistryMirrorCredentials.Username = string(username) initInput.RegistryMirrorCredentials.Password = string(password) } } // only do this if etcdadm not baked in image if !scope.Config.Spec.EtcdadmBuiltin { if len(scope.Config.Spec.EtcdadmInstallCommands) > 0 { initInput.PreEtcdadmCommands = append(initInput.PreEtcdadmCommands, scope.Config.Spec.EtcdadmInstallCommands...) } else { initInput.PreEtcdadmCommands = append(initInput.PreEtcdadmCommands, defaultEtcdadmInstallCommands...) } } var bootstrapData []byte var err error switch scope.Config.Spec.Format { case etcdbootstrapv1.Bottlerocket: bootstrapData, err = bottlerocket.NewInitEtcdPlane(&initInput, scope.Config.Spec, log) default: initInput.PreEtcdadmCommands = append(initInput.PreEtcdadmCommands, stopKubeletCommand) bootstrapData, err = cloudinit.NewInitEtcdPlane(&initInput, scope.Config.Spec) } if err != nil { log.Error(err, "Failed to generate cloud init for initializing etcd plane") return ctrl.Result{}, err } if err := r.storeBootstrapData(ctx, scope.Config, bootstrapData, scope.Cluster.Name); err != nil { log.Error(err, "Failed to store bootstrap data") return ctrl.Result{}, err } return ctrl.Result{}, nil } func (r *EtcdadmConfigReconciler) joinEtcd(ctx context.Context, scope *Scope) (_ ctrl.Result, rerr error) { log := r.Log etcdSecretName := fmt.Sprintf("%v-%v", scope.Cluster.Name, "etcd-init") existingSecret := &corev1.Secret{} if err := r.Client.Get(ctx, client.ObjectKey{Namespace: scope.Cluster.Namespace, Name: etcdSecretName}, existingSecret); err != nil { if apierrors.IsNotFound(err) { // this is not an error, just means the first machine didn't get an address yet, reconcile log.Info("Waiting for Machine Controller to set address on init machine and returning error") return ctrl.Result{}, err } log.Error(err, "Failed to get secret containing first machine address") return ctrl.Result{}, err } log.Info("Machine Controller has set address on init machine") etcdCerts := etcdCACertKeyPair() if err := etcdCerts.Lookup( ctx, r.Client, util.ObjectKey(scope.Cluster), ); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed doing a lookup for certs during join") } var joinAddress string if clientURL, ok := existingSecret.Data["clientUrls"]; ok { joinAddress = string(clientURL) } else { initMachineAddress := string(existingSecret.Data["address"]) joinAddress = fmt.Sprintf("https://%v:2379", initMachineAddress) } joinInput := userdata.EtcdPlaneJoinInput{ BaseUserData: userdata.BaseUserData{ Users: scope.Config.Spec.Users, PreEtcdadmCommands: scope.Config.Spec.PreEtcdadmCommands, NTP: scope.Config.Spec.NTP, Hostname: scope.Machine.Name, }, JoinAddress: joinAddress, Certificates: etcdCerts, } // grab user pass for registry mirror if scope.Config.Spec.RegistryMirror != nil { username, password, err := r.resolveRegistryCredentials(ctx, scope.Config) if err != nil { log.Info("Cannot find secret for registry credentials, proceeding without registry credentials") } else { joinInput.RegistryMirrorCredentials.Username = string(username) joinInput.RegistryMirrorCredentials.Password = string(password) } } if !scope.Config.Spec.EtcdadmBuiltin { if len(scope.Config.Spec.EtcdadmInstallCommands) > 0 { joinInput.PreEtcdadmCommands = append(joinInput.PreEtcdadmCommands, scope.Config.Spec.EtcdadmInstallCommands...) } else { joinInput.PreEtcdadmCommands = append(joinInput.PreEtcdadmCommands, defaultEtcdadmInstallCommands...) } } var bootstrapData []byte var err error switch scope.Config.Spec.Format { case etcdbootstrapv1.Bottlerocket: bootstrapData, err = bottlerocket.NewJoinEtcdPlane(&joinInput, scope.Config.Spec, log) default: joinInput.PreEtcdadmCommands = append(joinInput.PreEtcdadmCommands, stopKubeletCommand) bootstrapData, err = cloudinit.NewJoinEtcdPlane(&joinInput, scope.Config.Spec) } if err != nil { log.Error(err, "Failed to generate cloud init for bootstrap etcd plane - join") return ctrl.Result{}, err } if err := r.storeBootstrapData(ctx, scope.Config, bootstrapData, scope.Cluster.Name); err != nil { log.Error(err, "Failed to store bootstrap data - join") return ctrl.Result{}, err } return ctrl.Result{}, nil } func etcdCACertKeyPair() secret.Certificates { certificatesDir := "/etc/etcd/pki" certificates := secret.Certificates{ &secret.Certificate{ Purpose: secret.ManagedExternalEtcdCA, CertFile: filepath.Join(certificatesDir, "ca.crt"), KeyFile: filepath.Join(certificatesDir, "ca.key"), }, } return certificates } // storeBootstrapData creates a new secret with the data passed in as input, // sets the reference in the configuration status and ready to true. func (r *EtcdadmConfigReconciler) storeBootstrapData(ctx context.Context, config *etcdbootstrapv1.EtcdadmConfig, data []byte, clusterName string) error { log := r.Log se := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: config.Name, Namespace: config.Namespace, Labels: map[string]string{ clusterv1.ClusterNameLabel: clusterName, }, OwnerReferences: []metav1.OwnerReference{ { APIVersion: etcdbootstrapv1.GroupVersion.String(), Kind: config.Kind, Name: config.Name, UID: config.UID, Controller: pointer.BoolPtr(true), }, }, }, Data: map[string][]byte{ "value": data, }, Type: clusterv1.ClusterSecretType, } // as secret creation and scope.Config status patch are not atomic operations // it is possible that secret creation happens but the config.Status patches are not applied if err := r.Client.Create(ctx, se); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrapf(err, "failed to create bootstrap data secret for EtcdadmConfig %s/%s", config.Namespace, config.Name) } log.Info("bootstrap data secret for EtcdadmConfig already exists, updating", "secret", se.Name, "EtcdadmConfig", config.Name) if err := r.Client.Update(ctx, se); err != nil { return errors.Wrapf(err, "failed to update bootstrap data secret for EtcdadmConfig %s/%s", config.Namespace, config.Name) } } config.Status.DataSecretName = pointer.StringPtr(se.Name) config.Status.Ready = true conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) return nil } func (r *EtcdadmConfigReconciler) resolveRegistryCredentials(ctx context.Context, config *etcdbootstrapv1.EtcdadmConfig) ([]byte, []byte, error) { secret := &corev1.Secret{} key := types.NamespacedName{Namespace: config.Namespace, Name: registrySecretName} if err := r.Client.Get(ctx, key, secret); err != nil { if apierrors.IsNotFound(err) { return nil, nil, errors.Wrapf(err, "secret not found: %s", key) } return nil, nil, errors.Wrapf(err, "failed to retrieve Secret %q", key) } username, ok := secret.Data[registryUsernameKey] if !ok { return nil, nil, errors.Errorf("secret references non-existent secret key: %q", "username") } password, ok := secret.Data[registryPasswordKey] if !ok { return nil, nil, errors.Errorf("secret references non-existent secret key: %q", "password") } return username, password, nil }
453
etcdadm-bootstrap-provider
aws
Go
package controllers import ( "context" "fmt" "testing" "time" "k8s.io/utils/pointer" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" ) func setupScheme() *runtime.Scheme { scheme := runtime.NewScheme() if err := clusterv1.AddToScheme(scheme); err != nil { panic(err) } if err := etcdbootstrapv1.AddToScheme(scheme); err != nil { panic(err) } if err := corev1.AddToScheme(scheme); err != nil { panic(err) } return scheme } // MachineToBootstrapMapFunc enqueues EtcdadmConfig objects for reconciliation when associated Machine object is updated func TestEtcdadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") objs := []client.Object{cluster} var expectedConfigName string m := newMachine(cluster, "etcd-machine") configName := "etcdadm-config" c := newEtcdadmConfig(m, configName, etcdbootstrapv1.CloudConfig) objs = append(objs, m, c) expectedConfigName = configName fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objs...).Build() reconciler := &EtcdadmConfigReconciler{ Log: log.Log, Client: fakeClient, } o := m configs := reconciler.MachineToBootstrapMapFunc(o) g.Expect(configs[0].Name).To(Equal(expectedConfigName)) } func TestEtcdadmConfigReconciler_ClusterToEtcdadmConfigs(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") objs := []client.Object{cluster} var expectedConfigName string m := newMachine(cluster, "etcd-machine") configName := "etcdadm-config" c := newEtcdadmConfig(m, configName, etcdbootstrapv1.CloudConfig) objs = append(objs, m, c) expectedConfigName = configName fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objs...).Build() reconciler := &EtcdadmConfigReconciler{ Log: log.Log, Client: fakeClient, } o := cluster configs := reconciler.ClusterToEtcdadmConfigs(o) g.Expect(configs[0].Name).To(Equal(expectedConfigName)) } // Reconcile returns early if the etcdadm config is ready because it should never re-generate bootstrap data. func TestEtcdadmConfigReconciler_Reconcile_ReturnEarlyIfEtcdadmConfigIsReady(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") m := newMachine(cluster, "etcd-machine") config := newEtcdadmConfig(m, "etcdadmConfig", etcdbootstrapv1.CloudConfig) config.Status.Ready = true objects := []client.Object{ cluster, m, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Name: "etcdadmConfig", Namespace: "default", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) } // Reconcile returns an error in this case because the owning machine should not go away before the things it owns. func TestEtcdadmConfigReconciler_Reconcile_ReturnNilIfReferencedMachineIsNotFound(t *testing.T) { g := NewWithT(t) machine := newMachine(nil, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ // intentionally omitting machine config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } _, err := k.Reconcile(ctx, request) g.Expect(err).To(BeNil()) } // Return early If the owning machine does not have an associated cluster func TestEtcdadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasNoCluster(t *testing.T) { g := NewWithT(t) machine := newMachine(nil, "machine") // Machine without a cluster (no cluster label present) config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ machine, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } _, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) } // Return early If the owning machine has an associated cluster but it does not exist func TestEtcdadmConfigReconciler_Reconcile_ReturnEarlyIfMachinesClusterDoesNotExist(t *testing.T) { g := NewWithT(t) machine := newMachine(newCluster("external-etcd-cluster"), "machine") // Machine with a cluster label, but cluster won't exist config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ machine, config, // not create cluster object } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } _, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) } // Return early If the associated cluster is paused (typically after clusterctl move) func TestEtcdadmConfigReconciler_Reconcile_ReturnEarlyIfClusterIsPaused(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") cluster.Spec.Paused = true machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ cluster, machine, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } _, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) } // First Etcdadm Machine must initialize cluster since Cluster.Status.ManagedExternalEtcdInitialized is false and lock is not acquired func TestEtcdadmConfigReconciler_InitializeEtcdIfInitLockIsNotAquired_Cloudinit(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ cluster, machine, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) } // First Etcdadm Machine must initialize cluster since Cluster.Status.ManagedExternalEtcdInitialized is false and lock is not acquired func TestEtcdadmConfigReconciler_InitializeEtcdIfInitLockIsNotAquired_Bottlerocket(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.Bottlerocket) objects := []client.Object{ cluster, machine, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) } // The Secret containing bootstrap data for first etcdadm machine exists, but etcdadmConfig status is not patched yet func TestEtcdadmConfigBootstrapDataSecretCreatedStatusNotPatched(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ cluster, machine, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } dataSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: config.Name, Namespace: config.Namespace, Labels: map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, }, OwnerReferences: []metav1.OwnerReference{ { APIVersion: etcdbootstrapv1.GroupVersion.String(), Kind: config.Kind, Name: config.Name, UID: config.UID, Controller: pointer.BoolPtr(true), }, }, }, Data: map[string][]byte{ "value": nil, }, Type: clusterv1.ClusterSecretType, } err := myclient.Create(ctx, dataSecret) g.Expect(err).ToNot(HaveOccurred()) result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) } // NA for EKS-A since it etcdadm is built into the OVA func TestEtcdadmConfigReconciler_PreEtcdadmCommandsWhenEtcdadmNotBuiltin(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) config.Spec.EtcdadmBuiltin = false objects := []client.Object{ cluster, machine, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) } // If Init lock is already acquired but cluster status does not show etcd initialized and another etcdadmConfig is created, requeue it func TestEtcdadmConfigReconciler_RequeueIfInitLockIsAquired(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ cluster, machine, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{locked: true}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).To(BeNil()) } func TestEtcdadmConfigReconciler_JoinMemberInitSecretNotReady(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") cluster.Status.ManagedExternalEtcdInitialized = true conditions.MarkTrue(cluster, clusterv1.ManagedExternalEtcdClusterInitializedCondition) machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) objects := []client.Object{ cluster, machine, config, // not generating etcd init secret, so joinMember won't proceed since etcd cluster is not initialized fully } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } _, err := k.Reconcile(ctx, request) g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).Should(ContainSubstring("not found")) } func TestEtcdadmConfigReconciler_JoinMemberIfEtcdIsInitialized_CloudInit(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") cluster.Status.ManagedExternalEtcdInitialized = true conditions.MarkTrue(cluster, clusterv1.ManagedExternalEtcdClusterInitializedCondition) etcdInitSecret := newEtcdInitSecret(cluster) machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) etcdCACerts := etcdCACertKeyPair() etcdCACerts.Generate() etcdCASecret := etcdCACerts[0].AsSecret(client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, *metav1.NewControllerRef(config, etcdbootstrapv1.GroupVersion.WithKind("EtcdadmConfig"))) objects := []client.Object{ cluster, machine, etcdInitSecret, etcdCASecret, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) bootstrapSecret := &corev1.Secret{} err = myclient.Get(ctx, configKey, bootstrapSecret) g.Expect(err).NotTo(HaveOccurred()) g.Expect(bootstrapSecret.Data).To(Not(BeNil())) joinData := string(bootstrapSecret.Data["value"]) g.Expect(joinData).To(ContainSubstring("etcdadm join https://1.2.3.4:2379 --init-system systemd")) } func TestEtcdadmConfigReconciler_JoinMemberIfEtcdIsInitialized_Bottlerocket(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") cluster.Status.ManagedExternalEtcdInitialized = true conditions.MarkTrue(cluster, clusterv1.ManagedExternalEtcdClusterInitializedCondition) etcdInitSecret := newEtcdInitSecret(cluster) machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.Bottlerocket) etcdCACerts := etcdCACertKeyPair() etcdCACerts.Generate() etcdCASecret := etcdCACerts[0].AsSecret(client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, *metav1.NewControllerRef(config, etcdbootstrapv1.GroupVersion.WithKind("EtcdadmConfig"))) objects := []client.Object{ cluster, machine, etcdInitSecret, etcdCASecret, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) bootstrapSecret := &corev1.Secret{} err = myclient.Get(ctx, configKey, bootstrapSecret) g.Expect(err).NotTo(HaveOccurred()) g.Expect(bootstrapSecret.Data).To(Not(BeNil())) } // Older versions of CAPI fork create etcd secret containing only IP address of the first machine func TestEtcdadmConfigReconciler_JoinMemberIfEtcdIsInitialized_EtcdInitSecretOlderFormat(t *testing.T) { g := NewWithT(t) cluster := newCluster("external-etcd-cluster") cluster.Status.ManagedExternalEtcdInitialized = true conditions.MarkTrue(cluster, clusterv1.ManagedExternalEtcdClusterInitializedCondition) etcdInitSecret := newEtcdInitSecret(cluster) etcdInitSecret.Data = map[string][]byte{"address": []byte("1.2.3.4")} machine := newMachine(cluster, "machine") config := newEtcdadmConfig(machine, "etcdadmConfig", etcdbootstrapv1.CloudConfig) etcdCACerts := etcdCACertKeyPair() err := etcdCACerts.Generate() g.Expect(err).NotTo(HaveOccurred()) etcdCASecret := etcdCACerts[0].AsSecret(client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, *metav1.NewControllerRef(config, etcdbootstrapv1.GroupVersion.WithKind("EtcdadmConfig"))) objects := []client.Object{ cluster, machine, etcdInitSecret, etcdCASecret, config, } myclient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() k := &EtcdadmConfigReconciler{ Log: log.Log, Client: myclient, EtcdadmInitLock: &etcdInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: "default", Name: "etcdadmConfig", }, } result, err := k.Reconcile(ctx, request) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) c := conditions.Get(config, etcdbootstrapv1.DataSecretAvailableCondition) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) bootstrapSecret := &corev1.Secret{} err = myclient.Get(ctx, configKey, bootstrapSecret) g.Expect(err).NotTo(HaveOccurred()) g.Expect(bootstrapSecret.Data).To(Not(BeNil())) joinData := string(bootstrapSecret.Data["value"]) g.Expect(joinData).To(ContainSubstring("etcdadm join https://1.2.3.4:2379 --init-system systemd")) } // newCluster creates a CAPI Cluster object func newCluster(name string) *clusterv1.Cluster { c := &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: name, }, } return c } // newMachine return a CAPI Machine object; if cluster is not nil, the machine is linked to the cluster as well func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine { machine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: name, }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ ConfigRef: &corev1.ObjectReference{ Kind: "EtcdadmConfig", APIVersion: etcdbootstrapv1.GroupVersion.String(), }, }, }, } if cluster != nil { machine.Spec.ClusterName = cluster.Name machine.ObjectMeta.Labels = map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, } } return machine } // newEtcdadmConfig generates an EtcdadmConfig object for the external etcd cluster func newEtcdadmConfig(machine *clusterv1.Machine, name string, format etcdbootstrapv1.Format) *etcdbootstrapv1.EtcdadmConfig { config := &etcdbootstrapv1.EtcdadmConfig{ TypeMeta: metav1.TypeMeta{ Kind: "EtcdadmConfig", APIVersion: etcdbootstrapv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", }, Spec: etcdbootstrapv1.EtcdadmConfigSpec{ Format: format, CloudInitConfig: &etcdbootstrapv1.CloudInitConfig{}, EtcdadmBuiltin: true, }, } switch format { case etcdbootstrapv1.Bottlerocket: config.Spec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{} default: config.Spec.CloudInitConfig = &etcdbootstrapv1.CloudInitConfig{} } if machine != nil { config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ { Kind: "Machine", APIVersion: clusterv1.GroupVersion.String(), Name: machine.Name, }, } machine.Spec.Bootstrap.ConfigRef.Name = config.Name machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace } return config } func newEtcdInitSecret(cluster *clusterv1.Cluster) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: fmt.Sprintf("%v-%v", cluster.Name, "etcd-init"), }, Data: map[string][]byte{ "clientUrls": []byte("https://1.2.3.4:2379"), }, } } type etcdInitLocker struct { locked bool } func (m *etcdInitLocker) Lock(_ context.Context, _ *clusterv1.Cluster, _ *clusterv1.Machine) bool { if !m.locked { m.locked = true return true } return false } func (m *etcdInitLocker) Unlock(_ context.Context, _ *clusterv1.Cluster) bool { if m.locked { m.locked = false } return true }
758
etcdadm-bootstrap-provider
aws
Go
package controllers import ( "context" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) // MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue // requests for reconciliation of EtcdadmConfig. func (r *EtcdadmConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.Request { var result []ctrl.Request m, ok := o.(*clusterv1.Machine) if !ok { r.Log.Error(errors.Errorf("expected a Machine but got a %T", o.GetObjectKind()), "failed to get EtcdadmConfigs for Machine") return nil } if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == etcdbootstrapv1.GroupVersion.WithKind("EtcdadmConfig") { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } return result } // ClusterToEtcdadmConfigs is a handler.ToRequestsFunc to be used to enqeue // requests for reconciliation of EtcdadmConfigs. func (r *EtcdadmConfigReconciler) ClusterToEtcdadmConfigs(o client.Object) []ctrl.Request { var result []ctrl.Request c, ok := o.(*clusterv1.Cluster) if !ok { r.Log.Error(errors.Errorf("expected a Cluster but got a %T", o.GetObjectKind()), "failed to get EtcdadmConfigs for Cluster") return nil } selectors := []client.ListOption{ client.InNamespace(c.Namespace), client.MatchingLabels{ clusterv1.ClusterNameLabel: c.Name, }, } machineList := &clusterv1.MachineList{} if err := r.Client.List(context.Background(), machineList, selectors...); err != nil { r.Log.Error(err, "failed to list Machines", "Cluster", c.Name, "Namespace", c.Namespace) return nil } for _, m := range machineList.Items { if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == etcdbootstrapv1.GroupVersion.WithKind("EtcdadmConfig").GroupKind() { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } } return result }
63
etcdadm-bootstrap-provider
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "path/filepath" "testing" bootstrapv1beta1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( cfg *rest.Config ctx = ctrl.SetupSignalHandler() k8sClient client.Client testEnv *envtest.Environment ) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") } var _ = BeforeSuite(func(done Done) { By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, } var err error cfg, err = testEnv.Start() Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) err = bootstrapv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) close(done) }, 60) var _ = AfterSuite(func() { By("tearing down the test environment") err := testEnv.Stop() Expect(err).ToNot(HaveOccurred()) })
78
etcdadm-bootstrap-provider
aws
Go
package locking import ( "context" "encoding/json" "fmt" "github.com/go-logr/logr" "github.com/pkg/errors" apicorev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) const semaphoreInformationKey = "lock-information" // EtcdadmInitMutex uses a ConfigMap to synchronize cluster initialization. type EtcdadmInitMutex struct { log logr.Logger client client.Client } // NewControlPlaneInitMutex returns a lock that can be held by a control plane node before init. func NewEtcdadmInitMutex(log logr.Logger, client client.Client) *EtcdadmInitMutex { return &EtcdadmInitMutex{ log: log, client: client, } } // Lock allows a control plane node to be the first and only node to run kubeadm init func (c *EtcdadmInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool { sema := newSemaphore() cmName := configMapName(cluster.Name) log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName, "machine-name", machine.Name) err := c.client.Get(ctx, client.ObjectKey{ Namespace: cluster.Namespace, Name: cmName, }, sema.ConfigMap) switch { case apierrors.IsNotFound(err): break case err != nil: log.Error(err, "Failed to acquire lock") return false default: // successfully found an existing config map info, err := sema.information() if err != nil { log.Error(err, "Failed to get information about the existing lock") return false } // the machine requesting the lock is the machine that created the lock, therefore the lock is acquired if info.MachineName == machine.Name { return true } log.Info("Waiting on another machine to initialize", "init-machine", info.MachineName) return false } // Adds owner reference, namespace and name sema.setMetadata(cluster) // Adds the additional information if err := sema.setInformation(&information{MachineName: machine.Name}); err != nil { log.Error(err, "Failed to acquire lock while setting semaphore information") return false } log.Info("Attempting to acquire the lock") err = c.client.Create(ctx, sema.ConfigMap) switch { case apierrors.IsAlreadyExists(err): log.Info("Cannot acquire the lock. The lock has been acquired by someone else") return false case err != nil: log.Error(err, "Error acquiring the lock") return false default: return true } } // Unlock releases the lock func (c *EtcdadmInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool { sema := newSemaphore() cmName := configMapName(cluster.Name) log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName) log.Info("Checking for lock") err := c.client.Get(ctx, client.ObjectKey{ Namespace: cluster.Namespace, Name: cmName, }, sema.ConfigMap) switch { case apierrors.IsNotFound(err): log.Info("Control plane init lock not found, it may have been released already") return true case err != nil: log.Error(err, "Error unlocking the control plane init lock") return false default: // Delete the config map semaphore if there is no error fetching it if err := c.client.Delete(ctx, sema.ConfigMap); err != nil { if apierrors.IsNotFound(err) { return true } log.Error(err, "Error deleting the config map underlying the control plane init lock") return false } return true } } type information struct { MachineName string `json:"machineName"` } type semaphore struct { *apicorev1.ConfigMap } func newSemaphore() *semaphore { return &semaphore{&apicorev1.ConfigMap{}} } func configMapName(clusterName string) string { return fmt.Sprintf("%s-etcd-lock", clusterName) } func (s semaphore) information() (*information, error) { li := &information{} if err := json.Unmarshal([]byte(s.Data[semaphoreInformationKey]), li); err != nil { return nil, errors.Wrap(err, "failed to unmarshal semaphore information") } return li, nil } func (s semaphore) setInformation(information *information) error { b, err := json.Marshal(information) if err != nil { return errors.Wrap(err, "failed to marshal semaphore information") } s.Data = map[string]string{} s.Data[semaphoreInformationKey] = string(b) return nil } func (s *semaphore) setMetadata(cluster *clusterv1.Cluster) { s.ObjectMeta = metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: configMapName(cluster.Name), Labels: map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, }, OwnerReferences: []metav1.OwnerReference{ { APIVersion: cluster.APIVersion, Kind: cluster.Kind, Name: cluster.Name, UID: cluster.UID, }, }, } }
165
etcdadm-bootstrap-provider
aws
Go
package userdata import ( "fmt" "strings" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util/secret" ) // EtcdPlaneInput defines the context to generate etcd instance user data for initializing etcd cluster. type EtcdPlaneInput struct { BaseUserData secret.Certificates EtcdadmArgs EtcdadmInitCommand string } // EtcdPlaneJoinInput defines context to generate etcd instance user data for etcd plane node join. type EtcdPlaneJoinInput struct { BaseUserData secret.Certificates EtcdadmArgs EtcdadmJoinCommand string JoinAddress string } // BaseUserData is shared across all the various types of files written to disk. type BaseUserData struct { Header string PreEtcdadmCommands []string PostEtcdadmCommands []string AdditionalFiles []bootstrapv1.File WriteFiles []bootstrapv1.File Users []bootstrapv1.User NTP *bootstrapv1.NTP DiskSetup *bootstrapv1.DiskSetup Mounts []bootstrapv1.MountPoints ControlPlane bool SentinelFileCommand string Hostname string RegistryMirrorCredentials } type EtcdadmArgs struct { Version string ImageRepository string EtcdReleaseURL string InstallDir string CipherSuites string } type RegistryMirrorCredentials struct { Username string Password string } func (args *EtcdadmArgs) SystemdFlags() []string { flags := make([]string, 0, 3) flags = append(flags, "--init-system systemd") if args.Version != "" { flags = append(flags, fmt.Sprintf("--version %s", args.Version)) } if args.ImageRepository != "" { flags = append(flags, fmt.Sprintf("--release-url %s", args.EtcdReleaseURL)) } if args.InstallDir != "" { flags = append(flags, fmt.Sprintf("--install-dir %s", args.InstallDir)) } if args.CipherSuites != "" { flags = append(flags, fmt.Sprintf("--cipher-suites %s", args.CipherSuites)) } return flags } func AddSystemdArgsToCommand(cmd string, args *EtcdadmArgs) string { flags := args.SystemdFlags() fullCommand := make([]string, len(flags)+1) fullCommand = append(fullCommand, cmd) fullCommand = append(fullCommand, flags...) return strings.Join(fullCommand, " ") }
86
etcdadm-bootstrap-provider
aws
Go
package userdata import "strings" func TemplateYAMLIndent(i int, input string) string { split := strings.Split(input, "\n") ident := "\n" + strings.Repeat(" ", i) return strings.Repeat(" ", i) + strings.Join(split, ident) }
10
etcdadm-bootstrap-provider
aws
Go
package bottlerocket const ( filesTemplate = `{{ define "files" -}} write_files:{{ range . }} - path: {{.Path}} {{ if ne .Owner "" -}} owner: {{.Owner}} {{ end -}} {{ if ne .Permissions "" -}} permissions: '{{.Permissions}}' {{ end -}} content: | {{.Content | Indent 6}} {{- end -}} {{- end -}} ` ) const ( usersTemplate = `{{- if . }} { "ssh": { "authorized-keys": [{{.}}] } } {{- end -}} ` )
30
etcdadm-bootstrap-provider
aws
Go
package bottlerocket import ( "fmt" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/go-logr/logr" ) const etcdInitCloudInit = `{{.Header}} {{template "files" .WriteFiles}} - path: /run/cluster-api/placeholder owner: root:root permissions: '0640' content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)" runcmd: "{{ .EtcdadmInitCommand }}" ` // NewInitEtcdPlane returns the user data string to be used on a etcd instance. func NewInitEtcdPlane(input *userdata.EtcdPlaneInput, config etcdbootstrapv1.EtcdadmConfigSpec, log logr.Logger) ([]byte, error) { input.WriteFiles = input.Certificates.AsFiles() prepare(&input.BaseUserData) input.EtcdadmArgs = buildEtcdadmArgs(config) logIgnoredFields(&input.BaseUserData, log) input.EtcdadmInitCommand = fmt.Sprintf("EtcdadmInit %s %s %s", input.ImageRepository, input.Version, input.CipherSuites) userData, err := generateUserData("InitEtcdplane", etcdInitCloudInit, input, &input.BaseUserData, config, log) if err != nil { return nil, err } return userData, nil }
34
etcdadm-bootstrap-provider
aws
Go
package bottlerocket import ( "fmt" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/go-logr/logr" "github.com/pkg/errors" ) const ( etcdPlaneJoinCloudInit = `{{.Header}} {{template "files" .WriteFiles}} - path: /run/cluster-api/placeholder owner: root:root permissions: '0640' content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)" runcmd: "{{ .EtcdadmJoinCommand }}" ` ) // NewJoinControlPlane returns the user data string to be used on a new control plane instance. func NewJoinEtcdPlane(input *userdata.EtcdPlaneJoinInput, config etcdbootstrapv1.EtcdadmConfigSpec, log logr.Logger) ([]byte, error) { input.WriteFiles = input.Certificates.AsFiles() prepare(&input.BaseUserData) input.EtcdadmArgs = buildEtcdadmArgs(config) logIgnoredFields(&input.BaseUserData, log) input.ControlPlane = true input.EtcdadmJoinCommand = fmt.Sprintf("EtcdadmJoin %s %s %s %s", input.ImageRepository, input.Version, input.CipherSuites, input.JoinAddress) userData, err := generateUserData("JoinControlplane", etcdPlaneJoinCloudInit, input, &input.BaseUserData, config, log) if err != nil { return nil, errors.Wrapf(err, "failed to generate user data for machine joining control plane") } return userData, err }
38
etcdadm-bootstrap-provider
aws
Go
package bottlerocket import ( "bytes" "encoding/base64" "fmt" "strconv" "strings" "text/template" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/go-logr/logr" "github.com/pkg/errors" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) const ( hostContainersTemplate = `{{ define "hostContainersSettings" -}} {{- range .HostContainers }} [settings.host-containers.{{ .Name }}] enabled = true superpowered = {{ .Superpowered }} {{- if .Image }} source = "{{ .Image }}" {{- end }} {{- if .UserData }} user-data = "{{ .UserData }}" {{- end }} {{- end }} {{- end }} ` bootstrapContainersTemplate = `{{ define "bootstrapContainersSettings" -}} {{- range .BootstrapContainers }} [settings.bootstrap-containers.{{ .Name }}] essential = {{ .Essential }} mode = "{{ .Mode }}" {{- if .Image }} source = "{{ .Image }}" {{- end }} {{- if .UserData }} user-data = "{{ .UserData }}" {{- end }} {{- end }} {{- end }} ` kubernetesInitTemplate = `{{ define "kubernetesInitSettings" -}} [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "{{.PauseContainerSource}}" {{- end -}} ` networkInitTemplate = `{{ define "networkInitSettings" -}} [settings.network] hostname = "{{.Hostname}}" {{- if (ne .HTTPSProxyEndpoint "")}} https-proxy = "{{.HTTPSProxyEndpoint}}" no-proxy = [{{stringsJoin .NoProxyEndpoints "," }}] {{- end -}} {{- end -}} ` registryMirrorTemplate = `{{ define "registryMirrorSettings" -}} [settings.container-registry.mirrors] "public.ecr.aws" = ["https://{{.RegistryMirrorEndpoint}}"] {{- end -}} ` registryMirrorCACertTemplate = `{{ define "registryMirrorCACertSettings" -}} [settings.pki.registry-mirror-ca] data = "{{.RegistryMirrorCACert}}" trusted=true {{- end -}} ` registryMirrorCredentialsTemplate = `{{ define "registryMirrorCredentialsSettings" -}} [[settings.container-registry.credentials]] registry = "public.ecr.aws" username = "{{.RegistryMirrorUsername}}" password = "{{.RegistryMirrorPassword}}" [[settings.container-registry.credentials]] registry = "{{.RegistryMirrorEndpoint}}" username = "{{.RegistryMirrorUsername}}" password = "{{.RegistryMirrorPassword}}" {{- end -}} ` ntpTemplate = `{{ define "ntpSettings" -}} [settings.ntp] time-servers = [{{stringsJoin .NTPServers ", " }}] {{- end -}} ` sysctlSettingsTemplate = `{{ define "sysctlSettingsTemplate" -}} [settings.kernel.sysctl] {{.SysctlSettings}} {{- end -}} ` bootSettingsTemplate = `{{ define "bootSettings" -}} [settings.boot] reboot-to-reconcile = true [settings.boot.kernel-parameters] {{.BootKernel}} {{- end -}} ` certsTemplate = `{{ define "certsSettings" -}} [settings.pki.{{.Name}}] data = "{{.Data}}" trusted = true {{- end -}} ` certBundlesSliceTemplate = `{{ define "certBundlesSlice" -}} {{- range $cBundle := .CertBundles }} {{template "certsSettings" $cBundle }} {{- end -}} {{- end -}} ` bottlerocketNodeInitSettingsTemplate = `{{template "hostContainersSettings" .}} {{template "kubernetesInitSettings" .}} {{template "networkInitSettings" .}} {{- if .BootstrapContainers }} {{template "bootstrapContainersSettings" .}} {{- end -}} {{- if (ne .RegistryMirrorEndpoint "")}} {{template "registryMirrorSettings" .}} {{- end -}} {{- if (ne .RegistryMirrorCACert "")}} {{template "registryMirrorCACertSettings" .}} {{- end -}} {{- if and (ne .RegistryMirrorUsername "") (ne .RegistryMirrorPassword "")}} {{template "registryMirrorCredentialsSettings" .}} {{- end -}} {{- if .NTPServers}} {{template "ntpSettings" .}} {{- end -}} {{- if (ne .SysctlSettings "")}} {{template "sysctlSettingsTemplate" .}} {{- end -}} {{- if .BootKernel}} {{template "bootSettings" .}} {{- end -}} {{- if .CertBundles}} {{template "certBundlesSlice" .}} {{- end -}} ` ) type bottlerocketSettingsInput struct { PauseContainerSource string HTTPSProxyEndpoint string NoProxyEndpoints []string RegistryMirrorEndpoint string RegistryMirrorCACert string RegistryMirrorUsername string RegistryMirrorPassword string Hostname string HostContainers []etcdbootstrapv1.BottlerocketHostContainer BootstrapContainers []etcdbootstrapv1.BottlerocketBootstrapContainer NTPServers []string SysctlSettings string BootKernel string CertBundles []bootstrapv1.CertBundle } // generateBottlerocketNodeUserData returns the userdata for the host bottlerocket in toml format func generateBottlerocketNodeUserData(kubeadmBootstrapContainerUserData []byte, users []bootstrapv1.User, registryMirrorCredentials userdata.RegistryMirrorCredentials, hostname string, config etcdbootstrapv1.EtcdadmConfigSpec, log logr.Logger) ([]byte, error) { // base64 encode the kubeadm bootstrapContainer's user data b64KubeadmBootstrapContainerUserData := base64.StdEncoding.EncodeToString(kubeadmBootstrapContainerUserData) // Parse out all the ssh authorized keys sshAuthorizedKeys := getAllAuthorizedKeys(users) // generate the userdata for the admin container adminContainerUserData, err := generateAdminContainerUserData("InitAdminContainer", usersTemplate, sshAuthorizedKeys) if err != nil { return nil, err } b64AdminContainerUserData := base64.StdEncoding.EncodeToString(adminContainerUserData) hostContainers := []etcdbootstrapv1.BottlerocketHostContainer{ { Name: "admin", Superpowered: true, Image: config.BottlerocketConfig.AdminImage, UserData: b64AdminContainerUserData, }, { Name: "kubeadm-bootstrap", Superpowered: true, Image: config.BottlerocketConfig.BootstrapImage, UserData: b64KubeadmBootstrapContainerUserData, }, } if config.BottlerocketConfig.ControlImage != "" { hostContainers = append(hostContainers, etcdbootstrapv1.BottlerocketHostContainer{ Name: "control", Superpowered: false, Image: config.BottlerocketConfig.ControlImage, }) } bottlerocketInput := &bottlerocketSettingsInput{ PauseContainerSource: config.BottlerocketConfig.PauseImage, HostContainers: hostContainers, BootstrapContainers: config.BottlerocketConfig.CustomBootstrapContainers, Hostname: hostname, } if config.Proxy != nil { bottlerocketInput.HTTPSProxyEndpoint = config.Proxy.HTTPSProxy for _, noProxy := range config.Proxy.NoProxy { bottlerocketInput.NoProxyEndpoints = append(bottlerocketInput.NoProxyEndpoints, strconv.Quote(noProxy)) } } if config.RegistryMirror != nil { bottlerocketInput.RegistryMirrorEndpoint = config.RegistryMirror.Endpoint if config.RegistryMirror.CACert != "" { bottlerocketInput.RegistryMirrorCACert = base64.StdEncoding.EncodeToString([]byte(config.RegistryMirror.CACert)) } bottlerocketInput.RegistryMirrorUsername = registryMirrorCredentials.Username bottlerocketInput.RegistryMirrorPassword = registryMirrorCredentials.Password } if config.NTP != nil && config.NTP.Enabled != nil && *config.NTP.Enabled { for _, ntpServer := range config.NTP.Servers { bottlerocketInput.NTPServers = append(bottlerocketInput.NTPServers, strconv.Quote(ntpServer)) } } if config.CertBundles != nil { for _, cert := range config.CertBundles { cert.Data = base64.StdEncoding.EncodeToString([]byte(cert.Data)) bottlerocketInput.CertBundles = append(bottlerocketInput.CertBundles, cert) } } if config.BottlerocketConfig != nil { if config.BottlerocketConfig.Kernel != nil { bottlerocketInput.SysctlSettings = parseSysctlSettings(config.BottlerocketConfig.Kernel.SysctlSettings) } if config.BottlerocketConfig.Boot != nil { bottlerocketInput.BootKernel = parseBootSettings(config.BottlerocketConfig.Boot.BootKernelParameters) } } bottlerocketNodeUserData, err := generateNodeUserData("InitBottlerocketNode", bottlerocketNodeInitSettingsTemplate, bottlerocketInput) if err != nil { return nil, err } log.Info("Generated bottlerocket bootstrap userdata", "bootstrapContainerImage", config.BottlerocketConfig.BootstrapImage) return bottlerocketNodeUserData, nil } // parseKernelSettings parses through all the the settings and returns a list of the settings. func parseSysctlSettings(sysctlSettings map[string]string) string { sysctlSettingsToml := "" for key, value := range sysctlSettings { sysctlSettingsToml += fmt.Sprintf("\"%s\" = \"%s\"\n", key, value) } return sysctlSettingsToml } // parseBootSettings parses through all the boot settings and returns a list of the settings. func parseBootSettings(bootSettings map[string][]string) string { bootSettingsToml := "" for key, value := range bootSettings { var values []string if len(value) != 0 { for _, val := range value { quotedVal := "\"" + val + "\"" values = append(values, quotedVal) } } keyVal := strings.Join(values, ",") bootSettingsToml += fmt.Sprintf("\"%v\" = [%v]\n", key, keyVal) } return bootSettingsToml } // getAllAuthorizedKeys parses through all the users and return list of all user's authorized ssh keys func getAllAuthorizedKeys(users []bootstrapv1.User) string { var sshAuthorizedKeys []string for _, user := range users { if len(user.SSHAuthorizedKeys) != 0 { for _, key := range user.SSHAuthorizedKeys { quotedKey := "\"" + key + "\"" sshAuthorizedKeys = append(sshAuthorizedKeys, quotedKey) } } } return strings.Join(sshAuthorizedKeys, ",") } func generateAdminContainerUserData(kind string, tpl string, data interface{}) ([]byte, error) { tm := template.New(kind) if _, err := tm.Parse(usersTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse users - %s template", kind) } t, err := tm.Parse(tpl) if err != nil { return nil, errors.Wrapf(err, "failed to parse %s template", kind) } var out bytes.Buffer if err := t.Execute(&out, data); err != nil { return nil, errors.Wrapf(err, "failed to generate %s template", kind) } return out.Bytes(), nil } func generateNodeUserData(kind string, tpl string, data interface{}) ([]byte, error) { tm := template.New(kind).Funcs(template.FuncMap{"stringsJoin": strings.Join}) if _, err := tm.Parse(hostContainersTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse hostContainers %s template", kind) } if _, err := tm.Parse(bootstrapContainersTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse bootstrapContainers %s template", kind) } if _, err := tm.Parse(kubernetesInitTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse kubernetes %s template", kind) } if _, err := tm.Parse(networkInitTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse networks %s template", kind) } if _, err := tm.Parse(registryMirrorTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse registry mirror %s template", kind) } if _, err := tm.Parse(registryMirrorCACertTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse registry mirror ca cert %s template", kind) } if _, err := tm.Parse(registryMirrorCredentialsTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse registry mirror credentials %s template", kind) } if _, err := tm.Parse(ntpTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse NTP %s template", kind) } if _, err := tm.Parse(sysctlSettingsTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse sysctl settings %s template", kind) } if _, err := tm.Parse(bootSettingsTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse boot settings %s template", kind) } if _, err := tm.Parse(certsTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse certs %s template", kind) } if _, err := tm.Parse(certBundlesSliceTemplate); err != nil { return nil, errors.Wrapf(err, "failed to parse cert bundles %s template", kind) } t, err := tm.Parse(tpl) if err != nil { return nil, errors.Wrapf(err, "failed to parse %s template", kind) } var out bytes.Buffer if err := t.Execute(&out, data); err != nil { return nil, errors.Wrapf(err, "failed to generate %s template", kind) } return out.Bytes(), nil }
372
etcdadm-bootstrap-provider
aws
Go
package bottlerocket import ( "testing" "github.com/go-logr/logr" . "github.com/onsi/gomega" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" ) const ( userDataMinimum = ` [settings.host-containers.admin] enabled = true superpowered = true user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = ""` userDataWithProxyRegistryBootstrapContainers = ` [settings.host-containers.admin] enabled = true superpowered = true source = "custom-admin-image" user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.host-containers.control] enabled = true superpowered = false source = "custom-control-image" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "" https-proxy = "https-proxy" no-proxy = ["no-proxy-1","no-proxy-2"] [settings.bootstrap-containers.custom-bootstrap-1] essential = true mode = "always" source = "custom-bootstrap-image-1" user-data = "abc" [settings.bootstrap-containers.custom-bootstrap-2] essential = false mode = "once" source = "custom-bootstrap-image-2" user-data = "xyz" [settings.container-registry.mirrors] "public.ecr.aws" = ["https://registry-endpoint"] [settings.pki.registry-mirror-ca] data = "Y2FjZXJ0" trusted=true` userDataWithCustomBootstrapContainer = ` [settings.host-containers.admin] enabled = true superpowered = true source = "custom-admin-image" user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.host-containers.control] enabled = true superpowered = false source = "custom-control-image" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "" [settings.bootstrap-containers.custom-bootstrap-1] essential = true mode = "always" source = "custom-bootstrap-image-1" user-data = "abc" [settings.bootstrap-containers.custom-bootstrap-2] essential = false mode = "once" source = "custom-bootstrap-image-2" user-data = "xyz"` userDataWithRegistryAuth = ` [settings.host-containers.admin] enabled = true superpowered = true user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "" [settings.container-registry.mirrors] "public.ecr.aws" = ["https://registry-endpoint"] [settings.pki.registry-mirror-ca] data = "Y2FjZXJ0" trusted=true [[settings.container-registry.credentials]] registry = "public.ecr.aws" username = "username" password = "password" [[settings.container-registry.credentials]] registry = "registry-endpoint" username = "username" password = "password"` userDataWithNTP = ` [settings.host-containers.admin] enabled = true superpowered = true user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "" [settings.ntp] time-servers = ["1.2.3.4", "time-a.capi.com", "time-b.capi.com"]` userDataWithHostname = ` [settings.host-containers.admin] enabled = true superpowered = true source = "custom-admin-image" user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.host-containers.control] enabled = true superpowered = false source = "custom-control-image" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "hostname" https-proxy = "https-proxy" no-proxy = ["no-proxy-1","no-proxy-2"] [settings.bootstrap-containers.custom-bootstrap-1] essential = true mode = "always" source = "custom-bootstrap-image-1" user-data = "abc" [settings.bootstrap-containers.custom-bootstrap-2] essential = false mode = "once" source = "custom-bootstrap-image-2" user-data = "xyz" [settings.container-registry.mirrors] "public.ecr.aws" = ["https://registry-endpoint"] [settings.pki.registry-mirror-ca] data = "Y2FjZXJ0" trusted=true` userDataWithKernelSettings = ` [settings.host-containers.admin] enabled = true superpowered = true user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "" [settings.kernel.sysctl] "foo" = "bar" "abc" = "def" ` userDataWithBootSettings = ` [settings.host-containers.admin] enabled = true superpowered = true user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "" [settings.kernel.sysctl] "foo" = "bar" "abc" = "def" [settings.boot] reboot-to-reconcile = true [settings.boot.kernel-parameters] "foo" = ["abc","def,123"] "bar" = [] ` userDataWithCertBundleSettings = ` [settings.host-containers.admin] enabled = true superpowered = true user-data = "CnsKCSJzc2giOiB7CgkJImF1dGhvcml6ZWQta2V5cyI6IFsic3NoLWtleSJdCgl9Cn0=" [settings.host-containers.kubeadm-bootstrap] enabled = true superpowered = true source = "kubeadm-bootstrap-image" user-data = "a3ViZWFkbUJvb3RzdHJhcFVzZXJEYXRh" [settings.kubernetes] cluster-domain = "cluster.local" standalone-mode = true authentication-mode = "tls" server-tls-bootstrap = false pod-infra-container-image = "pause-image" [settings.network] hostname = "" [settings.pki.bundle1] data = "QUJDREVG" trusted = true [settings.pki.bundle2] data = "MTIzNDU2" trusted = true` ) func TestGenerateBottlerocketNodeUserData(t *testing.T) { g := NewWithT(t) trueVal := true testcases := []struct { name string kubeadmBootstrapUserData string hostname string users []bootstrapv1.User registryCredentials userdata.RegistryMirrorCredentials etcdConfig v1beta1.EtcdadmConfigSpec output string }{ { name: "minimum setting", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", PauseImage: "pause-image", }, }, output: userDataMinimum, }, { name: "with custom bootstrap container, with admin and control image", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", AdminImage: "custom-admin-image", ControlImage: "custom-control-image", PauseImage: "pause-image", CustomBootstrapContainers: []v1beta1.BottlerocketBootstrapContainer{ { Name: "custom-bootstrap-1", Image: "custom-bootstrap-image-1", Essential: true, Mode: "always", UserData: "abc", }, { Name: "custom-bootstrap-2", Image: "custom-bootstrap-image-2", Essential: false, Mode: "once", UserData: "xyz", }, }, }, }, output: userDataWithCustomBootstrapContainer, }, { name: "with proxy, registry and custom bootstrap containers", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", AdminImage: "custom-admin-image", ControlImage: "custom-control-image", PauseImage: "pause-image", CustomBootstrapContainers: []v1beta1.BottlerocketBootstrapContainer{ { Name: "custom-bootstrap-1", Image: "custom-bootstrap-image-1", Essential: true, Mode: "always", UserData: "abc", }, { Name: "custom-bootstrap-2", Image: "custom-bootstrap-image-2", Essential: false, Mode: "once", UserData: "xyz", }, }, }, Proxy: &v1beta1.ProxyConfiguration{ HTTPProxy: "http-proxy", HTTPSProxy: "https-proxy", NoProxy: []string{ "no-proxy-1", "no-proxy-2", }, }, RegistryMirror: &v1beta1.RegistryMirrorConfiguration{ Endpoint: "registry-endpoint", CACert: "cacert", }, }, output: userDataWithProxyRegistryBootstrapContainers, }, { name: "with registry with authentication", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, registryCredentials: userdata.RegistryMirrorCredentials{ Username: "username", Password: "password", }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", PauseImage: "pause-image", }, RegistryMirror: &v1beta1.RegistryMirrorConfiguration{ Endpoint: "registry-endpoint", CACert: "cacert", }, }, output: userDataWithRegistryAuth, }, { name: "with NTP config", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", PauseImage: "pause-image", }, NTP: &bootstrapv1.NTP{ Enabled: &trueVal, Servers: []string{ "1.2.3.4", "time-a.capi.com", "time-b.capi.com", }, }, }, output: userDataWithNTP, }, { name: "with proxy, hostname", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", hostname: "hostname", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", AdminImage: "custom-admin-image", ControlImage: "custom-control-image", PauseImage: "pause-image", CustomBootstrapContainers: []v1beta1.BottlerocketBootstrapContainer{ { Name: "custom-bootstrap-1", Image: "custom-bootstrap-image-1", Essential: true, Mode: "always", UserData: "abc", }, { Name: "custom-bootstrap-2", Image: "custom-bootstrap-image-2", Essential: false, Mode: "once", UserData: "xyz", }, }, }, Proxy: &v1beta1.ProxyConfiguration{ HTTPProxy: "http-proxy", HTTPSProxy: "https-proxy", NoProxy: []string{ "no-proxy-1", "no-proxy-2", }, }, RegistryMirror: &v1beta1.RegistryMirrorConfiguration{ Endpoint: "registry-endpoint", CACert: "cacert", }, }, output: userDataWithHostname, }, { name: "with kernel config", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", PauseImage: "pause-image", Kernel: &bootstrapv1.BottlerocketKernelSettings{ SysctlSettings: map[string]string{ "foo": "bar", "abc": "def", }, }, }, }, output: userDataWithKernelSettings, }, { name: "with boot settings config", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", PauseImage: "pause-image", Kernel: &bootstrapv1.BottlerocketKernelSettings{ SysctlSettings: map[string]string{ "foo": "bar", "abc": "def", }, }, Boot: &bootstrapv1.BottlerocketBootSettings{ BootKernelParameters: map[string][]string{ "foo": { "abc", "def,123", }, "bar": {}, }, }, }, }, output: userDataWithBootSettings, }, { name: "with cert bundle settings config", kubeadmBootstrapUserData: "kubeadmBootstrapUserData", users: []bootstrapv1.User{ { SSHAuthorizedKeys: []string{ "ssh-key", }, }, }, etcdConfig: v1beta1.EtcdadmConfigSpec{ CertBundles: []bootstrapv1.CertBundle{ { Name: "bundle1", Data: "ABCDEF", }, { Name: "bundle2", Data: "123456", }, }, BottlerocketConfig: &v1beta1.BottlerocketConfig{ BootstrapImage: "kubeadm-bootstrap-image", PauseImage: "pause-image", }, }, output: userDataWithCertBundleSettings, }, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { b, err := generateBottlerocketNodeUserData([]byte(testcase.kubeadmBootstrapUserData), testcase.users, testcase.registryCredentials, testcase.hostname, testcase.etcdConfig, logr.New(log.NullLogSink{})) g.Expect(err).NotTo(HaveOccurred()) g.Expect(string(b)).To(Equal(testcase.output)) }) } }
615
etcdadm-bootstrap-provider
aws
Go
package bottlerocket import ( "path/filepath" "strings" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/go-logr/logr" ) const ( orgCertsPath = "/etc/etcd/pki" newCertsPath = "/var/lib/etcd/pki" ) func prepare(input *userdata.BaseUserData) { input.Header = cloudConfigHeader input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) input.SentinelFileCommand = sentinelFileCommand patchCertPaths(input) } func patchCertPaths(input *userdata.BaseUserData) { for ind, file := range input.WriteFiles { if filepath.Dir(file.Path) == orgCertsPath { file.Path = filepath.Join(newCertsPath, filepath.Base(file.Path)) } input.WriteFiles[ind] = file } } func buildEtcdadmArgs(config etcdbootstrapv1.EtcdadmConfigSpec) userdata.EtcdadmArgs { repository, tag := splitRepositoryAndTag(config.BottlerocketConfig.EtcdImage) return userdata.EtcdadmArgs{ Version: strings.TrimPrefix(tag, "v"), // trim "v" to get pure simver because that's what etcdadm expects. ImageRepository: repository, CipherSuites: config.CipherSuites, } } func splitRepositoryAndTag(image string) (repository, tag string) { lastInd := strings.LastIndex(image, ":") if lastInd == -1 { return image, "" } if lastInd == len(image)-1 { return image[:lastInd], "" } return image[:lastInd], image[lastInd+1:] } func logIgnoredFields(input *userdata.BaseUserData, log logr.Logger) { if len(input.PreEtcdadmCommands) > 0 { log.Info("Ignoring PreEtcdadmCommands. Not supported with bottlerocket") } if len(input.PostEtcdadmCommands) > 0 { log.Info("Ignoring PostEtcdadmCommands. Not supported with bottlerocket") } if input.DiskSetup != nil { log.Info("Ignoring DiskSetup. Not supported with bottlerocket") } if len(input.Mounts) > 0 { log.Info("Ignoring Mounts. Not supported with bottlerocket") } }
69
etcdadm-bootstrap-provider
aws
Go
package bottlerocket import ( "bytes" "text/template" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/go-logr/logr" "github.com/pkg/errors" ) const ( cloudConfigHeader = `## template: jinja #cloud-config ` // sentinelFileCommand writes a file to /run/cluster-api to signal successful Kubernetes bootstrapping in a way that // works both for Linux and Windows OS. sentinelFileCommand = "echo success > /run/cluster-api/bootstrap-success.complete" ) var defaultTemplateFuncMap = template.FuncMap{ "Indent": userdata.TemplateYAMLIndent, } func generateUserData(kind string, tpl string, data interface{}, input *userdata.BaseUserData, config etcdbootstrapv1.EtcdadmConfigSpec, log logr.Logger) ([]byte, error) { bootstrapContainerUserData, err := generateBootstrapContainerUserData(kind, tpl, data) if err != nil { return nil, err } return generateBottlerocketNodeUserData(bootstrapContainerUserData, input.Users, input.RegistryMirrorCredentials, input.Hostname, config, log) } func generateBootstrapContainerUserData(kind string, tpl string, data interface{}) ([]byte, error) { tm := template.New(kind).Funcs(defaultTemplateFuncMap) if _, err := tm.Parse(filesTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse files template") } t, err := tm.Parse(tpl) if err != nil { return nil, errors.Wrapf(err, "failed to parse %s template", kind) } var out bytes.Buffer if err := t.Execute(&out, data); err != nil { return nil, errors.Wrapf(err, "failed to generate %s template", kind) } return out.Bytes(), nil }
53
etcdadm-bootstrap-provider
aws
Go
package cloudinit import ( "bytes" "fmt" "strings" "text/template" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/pkg/errors" capbk "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) const ( standardInitCommand = "etcdadm init" standardJoinCommand = "etcdadm join %s" // sentinelFileCommand writes a file to /run/cluster-api to signal successful Kubernetes bootstrapping in a way that // works both for Linux and Windows OS. sentinelFileCommand = "echo success > /run/cluster-api/bootstrap-success.complete" cloudConfigHeader = `## template: jinja #cloud-config ` proxyConf = ` [Service] Environment="HTTP_PROXY={{.HTTPProxy}}" Environment="HTTPS_PROXY={{.HTTPSProxy}}" Environment="NO_PROXY={{ stringsJoin .NoProxy "," }}" ` registryMirrorConf = ` [plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"] endpoint = ["https://{{.Endpoint}}"] [plugins."io.containerd.grpc.v1.cri".registry.configs."{{.Endpoint}}".tls] {{- if not .CACert }} insecure_skip_verify = true {{- else }} ca_file = "/etc/containerd/certs.d/{{.Endpoint}}/ca.crt" {{- end }} ` ) var containerdRestart = []string{"sudo systemctl daemon-reload", "sudo systemctl restart containerd"} var defaultTemplateFuncMap = template.FuncMap{ "Indent": userdata.TemplateYAMLIndent, } func generate(kind string, tpl string, data interface{}) ([]byte, error) { tm := template.New(kind).Funcs(defaultTemplateFuncMap) if _, err := tm.Parse(filesTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse files template") } if _, err := tm.Parse(commandsTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse commands template") } if _, err := tm.Parse(ntpTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse ntp template") } if _, err := tm.Parse(usersTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse users template") } if _, err := tm.Parse(diskSetupTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse disk setup template") } if _, err := tm.Parse(fsSetupTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse fs setup template") } if _, err := tm.Parse(mountsTemplate); err != nil { return nil, errors.Wrap(err, "failed to parse mounts template") } t, err := tm.Parse(tpl) if err != nil { return nil, errors.Wrapf(err, "failed to parse %s template", kind) } var out bytes.Buffer if err := t.Execute(&out, data); err != nil { return nil, errors.Wrapf(err, "failed to generate %s template", kind) } return out.Bytes(), nil } func prepare(input *userdata.BaseUserData) error { input.Header = cloudConfigHeader input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) input.SentinelFileCommand = sentinelFileCommand return nil } func buildEtcdadmArgs(config etcdbootstrapv1.EtcdadmConfigSpec) userdata.EtcdadmArgs { return userdata.EtcdadmArgs{ Version: config.CloudInitConfig.Version, EtcdReleaseURL: config.CloudInitConfig.EtcdReleaseURL, InstallDir: config.CloudInitConfig.InstallDir, CipherSuites: config.CipherSuites, } } func setProxy(proxy *etcdbootstrapv1.ProxyConfiguration, input *userdata.BaseUserData) error { if proxy == nil { return nil } tmpl := template.New("proxy").Funcs(template.FuncMap{"stringsJoin": strings.Join}) t, err := tmpl.Parse(proxyConf) if err != nil { return fmt.Errorf("failed to parse proxy template: %v", err) } var out bytes.Buffer if err = t.Execute(&out, proxy); err != nil { return fmt.Errorf("error generating proxy config file: %v", err) } input.AdditionalFiles = append(input.AdditionalFiles, capbk.File{ Content: out.String(), Owner: "root:root", Path: "/etc/systemd/system/containerd.service.d/http-proxy.conf", }) input.PreEtcdadmCommands = append(input.PreEtcdadmCommands, containerdRestart...) return nil } func setRegistryMirror(registryMirror *etcdbootstrapv1.RegistryMirrorConfiguration, input *userdata.BaseUserData) error { if registryMirror == nil { return nil } tmpl := template.New("registryMirror") t, err := tmpl.Parse(registryMirrorConf) if err != nil { return fmt.Errorf("failed to parse registryMirror template: %v", err) } var out bytes.Buffer if err = t.Execute(&out, registryMirror); err != nil { return fmt.Errorf("error generating registryMirror config file: %v", err) } input.AdditionalFiles = append(input.AdditionalFiles, capbk.File{ Content: registryMirror.CACert, Owner: "root:root", Path: fmt.Sprintf("/etc/containerd/certs.d/%s/ca.crt", registryMirror.Endpoint), }, capbk.File{ Content: out.String(), Owner: "root:root", Path: "/etc/containerd/config_append.toml", }, ) input.PreEtcdadmCommands = append(input.PreEtcdadmCommands, `cat /etc/containerd/config_append.toml >> /etc/containerd/config.toml`) input.PreEtcdadmCommands = append(input.PreEtcdadmCommands, containerdRestart...) return nil }
165
etcdadm-bootstrap-provider
aws
Go
package cloudinit const ( filesTemplate = `{{ define "files" -}} write_files:{{ range . }} - path: {{.Path}} {{ if ne .Encoding "" -}} encoding: "{{.Encoding}}" {{ end -}} {{ if ne .Owner "" -}} owner: {{.Owner}} {{ end -}} {{ if ne .Permissions "" -}} permissions: '{{.Permissions}}' {{ end -}} content: | {{.Content | Indent 6}} {{- end -}} {{- end -}} ` ) const ( commandsTemplate = `{{- define "commands" -}} {{ range . }} - {{printf "%q" .}} {{- end -}} {{- end -}} ` ) const ( ntpTemplate = `{{ define "ntp" -}} {{- if . }} ntp: {{ if .Enabled -}} enabled: true {{ end -}} servers:{{ range .Servers }} - {{ . }} {{- end -}} {{- end -}} {{- end -}} ` ) const ( usersTemplate = `{{ define "users" -}} {{- if . }} users:{{ range . }} - name: {{ .Name }} {{- if .Passwd }} passwd: {{ .Passwd }} {{- end -}} {{- if .Gecos }} gecos: {{ .Gecos }} {{- end -}} {{- if .Groups }} groups: {{ .Groups }} {{- end -}} {{- if .HomeDir }} homedir: {{ .HomeDir }} {{- end -}} {{- if .Inactive }} inactive: true {{- end -}} {{- if .LockPassword }} lock_passwd: {{ .LockPassword }} {{- end -}} {{- if .Shell }} shell: {{ .Shell }} {{- end -}} {{- if .PrimaryGroup }} primary_group: {{ .PrimaryGroup }} {{- end -}} {{- if .Sudo }} sudo: {{ .Sudo }} {{- end -}} {{- if .SSHAuthorizedKeys }} ssh_authorized_keys:{{ range .SSHAuthorizedKeys }} - {{ . }} {{- end -}} {{- end -}} {{- end -}} {{- end -}} {{- end -}} ` ) const ( diskSetupTemplate = `{{ define "disk_setup" -}} {{- if . }} disk_setup:{{ range .Partitions }} {{ .Device }}: {{- if .TableType }} table_type: {{ .TableType }} {{- end }} layout: {{ .Layout }} {{- if .Overwrite }} overwrite: {{ .Overwrite }} {{- end -}} {{- end -}} {{- end -}} {{- end -}} ` ) const ( fsSetupTemplate = `{{ define "fs_setup" -}} {{- if . }} fs_setup:{{ range .Filesystems }} - label: {{ .Label }} filesystem: {{ .Filesystem }} device: {{ .Device }} {{- if .Partition }} partition: {{ .Partition }} {{- end }} {{- if .Overwrite }} overwrite: {{ .Overwrite }} {{- end }} {{- if .ReplaceFS }} replace_fs: {{ .ReplaceFS }} {{- end }} {{- if .ExtraOpts }} extra_opts: {{ range .ExtraOpts }} - {{ . }} {{- end -}} {{- end -}} {{- end -}} {{- end -}} {{- end -}} ` ) const ( mountsTemplate = `{{ define "mounts" -}} {{- if . }} mounts:{{ range . }} - {{ range . }}- {{ . }} {{ end -}} {{- end -}} {{- end -}} {{- end -}} ` )
146
etcdadm-bootstrap-provider
aws
Go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cloudinit import ( etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/pkg/errors" ) const ( etcdPlaneCloudInit = `{{.Header}} {{template "files" .WriteFiles}} - path: /run/cluster-api/placeholder owner: root:root permissions: '0640' content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)" runcmd: {{- template "commands" .PreEtcdadmCommands }} - {{ .EtcdadmInitCommand }} && {{ .SentinelFileCommand }} {{- template "commands" .PostEtcdadmCommands }} {{- template "ntp" .NTP }} {{- template "users" .Users }} {{- template "disk_setup" .DiskSetup}} {{- template "fs_setup" .DiskSetup}} {{- template "mounts" .Mounts}} ` ) // NewInitEtcdPlane returns the user data string to be used on a etcd instance. func NewInitEtcdPlane(input *userdata.EtcdPlaneInput, config etcdbootstrapv1.EtcdadmConfigSpec) ([]byte, error) { input.WriteFiles = input.Certificates.AsFiles() input.EtcdadmArgs = buildEtcdadmArgs(config) input.EtcdadmInitCommand = userdata.AddSystemdArgsToCommand(standardInitCommand, &input.EtcdadmArgs) if err := setProxy(config.Proxy, &input.BaseUserData); err != nil { return nil, err } if err := setRegistryMirror(config.RegistryMirror, &input.BaseUserData); err != nil { return nil, err } if err := prepare(&input.BaseUserData); err != nil { return nil, err } userData, err := generate("InitEtcdCluster", etcdPlaneCloudInit, input) if err != nil { return nil, errors.Wrapf(err, "failed to generate user data for machine initializing etcd cluster") } return userData, nil }
65
etcdadm-bootstrap-provider
aws
Go
package cloudinit import ( "fmt" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "github.com/aws/etcdadm-bootstrap-provider/pkg/userdata" "github.com/pkg/errors" ) const ( etcdPlaneJoinCloudInit = `{{.Header}} {{template "files" .WriteFiles}} - path: /run/cluster-api/placeholder owner: root:root permissions: '0640' content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)" runcmd: {{- template "commands" .PreEtcdadmCommands }} - {{ .EtcdadmJoinCommand }} && {{ .SentinelFileCommand }} {{- template "commands" .PostEtcdadmCommands }} {{- template "ntp" .NTP }} {{- template "users" .Users }} {{- template "disk_setup" .DiskSetup}} {{- template "fs_setup" .DiskSetup}} {{- template "mounts" .Mounts}} ` ) // NewJoinControlPlane returns the user data string to be used on a new control plane instance. func NewJoinEtcdPlane(input *userdata.EtcdPlaneJoinInput, config etcdbootstrapv1.EtcdadmConfigSpec) ([]byte, error) { input.WriteFiles = input.Certificates.AsFiles() input.EtcdadmArgs = buildEtcdadmArgs(config) input.EtcdadmJoinCommand = userdata.AddSystemdArgsToCommand(fmt.Sprintf(standardJoinCommand, input.JoinAddress), &input.EtcdadmArgs) if err := setProxy(config.Proxy, &input.BaseUserData); err != nil { return nil, err } if err := setRegistryMirror(config.RegistryMirror, &input.BaseUserData); err != nil { return nil, err } if err := prepare(&input.BaseUserData); err != nil { return nil, err } userData, err := generate("JoinEtcdCluster", etcdPlaneJoinCloudInit, input) if err != nil { return nil, errors.Wrapf(err, "failed to generate user data for machine joining etcd cluster") } return userData, err }
51
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "context" "flag" "os" "os/signal" "syscall" etcdbp "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log/zap" etcdclusterv1alpha3 "github.com/aws/etcdadm-controller/api/v1alpha3" etcdclusterv1beta1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/aws/etcdadm-controller/controllers" // +kubebuilder:scaffold:imports ) var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") watchNamespace string ) func init() { _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) _ = etcdbp.AddToScheme(scheme) _ = etcdclusterv1alpha3.AddToScheme(scheme) _ = etcdclusterv1beta1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } func main() { var metricsAddr string var enableLeaderElection bool var maxConcurrentReconciles int flag.StringVar(&metricsAddr, "metrics-addr", "localhost:8080", "The address the metric endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") flag.StringVar(&watchNamespace, "namespace", "", "Namespace that the controller watches to reconcile etcdadmCluster objects. If unspecified, the controller watches for objects across all namespaces.") flag.IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 10, "The maximum number of concurrent etcdadm-controller reconciles.") flag.Parse() ctrl.SetLogger(zap.New(zap.UseDevMode(true))) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsAddr, Port: 9443, LeaderElection: enableLeaderElection, LeaderElectionID: "cc88008e.cluster.x-k8s.io", Namespace: watchNamespace, }) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } // Setup the context that's going to be used in controllers and for the manager. ctx, stopCh := setupSignalHandler() etcdadmReconciler := &controllers.EtcdadmClusterReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("EtcdadmCluster"), Scheme: mgr.GetScheme(), MaxConcurrentReconciles: maxConcurrentReconciles, } if err = (etcdadmReconciler).SetupWithManager(ctx, mgr, stopCh); err != nil { setupLog.Error(err, "unable to create controller", "controller", "EtcdadmCluster") os.Exit(1) } if err = (&etcdclusterv1beta1.EtcdadmCluster{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "EtcdadmCluster") os.Exit(1) } // +kubebuilder:scaffold:builder setupLog.Info("starting manager") if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } } var onlyOneSignalHandler = make(chan struct{}) var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} /* Controller runtime 0.5.4 returns a stop channel and 0.7.0 onwards returns a context that can be passed down to SetupWithManager and reconcilers Because cluster-api v0.3.x uses controller-runtime 0.5.4 version, etcdadm-controller cannot switch to a higher controller-runtime due to version mismatch errors So this function setupSignalHandler is a modified version of controller-runtime's SetupSignalHandler that returns both, a stop channel and a context that is cancelled when this controller exits */ func setupSignalHandler() (context.Context, <-chan struct{}) { close(onlyOneSignalHandler) // panics when called twice ctx, cancel := context.WithCancel(context.Background()) stop := make(chan struct{}) c := make(chan os.Signal, 2) signal.Notify(c, shutdownSignals...) go func() { <-c cancel() close(stop) <-c os.Exit(1) // second signal. Exit directly. }() return ctx, stop }
136
etcdadm-controller
aws
Go
package v1alpha3 import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" const ( // EtcdMachinesSpecUpToDateCondition documents that the spec of the machines controlled by the EtcdadmCluster // is up to date. When this condition is false, the EtcdadmCluster is executing a rolling upgrade. EtcdMachinesSpecUpToDateCondition clusterv1.ConditionType = "EtcdMachinesSpecUpToDate" // EtcdRollingUpdateInProgressReason (Severity=Warning) documents an EtcdadmCluster object executing a // rolling upgrade for aligning the machines spec to the desired state. EtcdRollingUpdateInProgressReason = "EtcdRollingUpdateInProgress" // EtcdCertificatesAvailableCondition indicates that the etcdadm controller has generated the etcd certs to be used by new members // joining the etcd cluster, and to be used by the controlplane EtcdCertificatesAvailableCondition clusterv1.ConditionType = "EtcdCertificatesAvailable" // EtcdClusterResizeCompleted indicates if cluster is finished with scale up/down or is being resized EtcdClusterResizeCompleted clusterv1.ConditionType = "EtcdClusterResizeCompleted" // EtcdScaleUpInProgressReason indicates scale up is in progress EtcdScaleUpInProgressReason = "ScalingUp" // EtcdScaleDownInProgressReason indicates scale down is in progress EtcdScaleDownInProgressReason = "ScalingDown" // InitializedCondition shows if etcd cluster has been initialized, which is when the first etcd member has been initialized InitializedCondition clusterv1.ConditionType = "Initialized" // WaitingForEtcdadmInitReason shows that the first etcd member has not been created yet WaitingForEtcdadmInitReason = "WaitingForEtcdadmInit" // EtcdMachinesReadyCondition stores an aggregate status of all owned machines EtcdMachinesReadyCondition clusterv1.ConditionType = "EtcdMachinesReady" // EtcdClusterHasNoOutdatedMembersCondition indicates that all etcd members are up-to-date. NOTE: this includes even members present on Machines not owned by the // etcdadm cluster EtcdClusterHasNoOutdatedMembersCondition clusterv1.ConditionType = "EtcdClusterHasNoOutdatedMachines" // EtcdClusterHasOutdatedMembersReason shows that some of the etcd members are out-of-date EtcdClusterHasOutdatedMembersReason = "EtcdClusterHasOutdatedMachines" )
43
etcdadm-controller
aws
Go
package v1alpha3 import ( etcdv1beta1 "github.com/aws/etcdadm-controller/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/conversion" ) // ConvertTo converts this EtcdadmCluster to the Hub version (v1beta1). func (src *EtcdadmCluster) ConvertTo(dstRaw conversion.Hub) error { // nolint dst := dstRaw.(*etcdv1beta1.EtcdadmCluster) if err := Convert_v1alpha3_EtcdadmCluster_To_v1beta1_EtcdadmCluster(src, dst, nil); err != nil { return err } return nil } // ConvertFrom converts from the Hub version (v1beta1) to this EtcdadmCluster. func (dst *EtcdadmCluster) ConvertFrom(srcRaw conversion.Hub) error { // nolint src := srcRaw.(*etcdv1beta1.EtcdadmCluster) return Convert_v1beta1_EtcdadmCluster_To_v1alpha3_EtcdadmCluster(src, dst, nil) } // ConvertTo converts this EtcdadmClusterList to the Hub version (v1beta1). func (src *EtcdadmClusterList) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*etcdv1beta1.EtcdadmClusterList) if err := Convert_v1alpha3_EtcdadmClusterList_To_v1beta1_EtcdadmClusterList(src, dst, nil); err != nil { return err } return nil } // ConvertFrom converts from the Hub version (v1beta1) to this EtcdadmCluster. func (dst *EtcdadmClusterList) ConvertFrom(srcRaw conversion.Hub) error { // nolint src := srcRaw.(*etcdv1beta1.EtcdadmClusterList) return Convert_v1beta1_EtcdadmClusterList_To_v1alpha3_EtcdadmClusterList(src, dst, nil) }
37
etcdadm-controller
aws
Go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1alpha3 contains API Schema definitions for the etcd cluster v1alpha3 API group // +kubebuilder:object:generate=true // +groupName=etcdcluster.cluster.x-k8s.io // +k8s:conversion-gen=github.com/aws/etcdadm-controller/api/v1beta1 package v1alpha3
22
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 import ( etcdbp "github.com/aws/etcdadm-bootstrap-provider/api/v1alpha3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" ) const ( UpgradeInProgressAnnotation = "etcdcluster.cluster.x-k8s.io/upgrading" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // EtcdadmClusterSpec defines the desired state of EtcdadmCluster type EtcdadmClusterSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file Replicas *int32 `json:"replicas,omitempty"` // InfrastructureTemplate is a required reference to a custom resource // offered by an infrastructure provider. InfrastructureTemplate corev1.ObjectReference `json:"infrastructureTemplate"` // +optional EtcdadmConfigSpec etcdbp.EtcdadmConfigSpec `json:"etcdadmConfigSpec"` } // EtcdadmClusterStatus defines the observed state of EtcdadmCluster type EtcdadmClusterStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file // Total number of non-terminated machines targeted by this etcd cluster // (their labels match the selector). // +optional ReadyReplicas int32 `json:"replicas,omitempty"` // +optional InitMachineAddress string `json:"initMachineAddress"` // +optional Initialized bool `json:"initialized"` // Ready reflects the state of the etcd cluster, whether all of its members have passed healthcheck and are ready to serve requests or not. // +optional Ready bool `json:"ready"` // CreationComplete gets set to true once the etcd cluster is created. Its value never changes after that. // It is used as a way to indicate that the periodic healthcheck loop can be run for the particular etcd cluster. // +optional CreationComplete bool `json:"creationComplete"` // +optional Endpoints string `json:"endpoints"` // Selector is the label selector in string format to avoid introspection // by clients, and is used to provide the CRD-based integration for the // scale subresource and additional integrations for things like kubectl // describe.. The string will be in the same format as the query-param syntax. // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional Selector string `json:"selector,omitempty"` // ObservedGeneration is the latest generation observed by the controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` // Conditions defines current service state of the EtcdadmCluster. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // EtcdadmCluster is the Schema for the etcdadmclusters API type EtcdadmCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec EtcdadmClusterSpec `json:"spec,omitempty"` Status EtcdadmClusterStatus `json:"status,omitempty"` } func (in *EtcdadmCluster) GetConditions() clusterv1.Conditions { return in.Status.Conditions } func (in *EtcdadmCluster) SetConditions(conditions clusterv1.Conditions) { in.Status.Conditions = conditions } // +kubebuilder:object:root=true // EtcdadmClusterList contains a list of EtcdadmCluster type EtcdadmClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []EtcdadmCluster `json:"items"` } func init() { SchemeBuilder.Register(&EtcdadmCluster{}, &EtcdadmClusterList{}) }
124
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1alpha3 contains API Schema definitions for the etcdcluster v1alpha3 API group // +kubebuilder:object:generate=true // +groupName=etcdcluster.cluster.x-k8s.io package v1alpha3 import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "etcdcluster.cluster.x-k8s.io", Version: "v1alpha3"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme // localSchemeBuilder is used for type conversions. localSchemeBuilder = SchemeBuilder.SchemeBuilder )
40
etcdadm-controller
aws
Go
//go:build !ignore_autogenerated_etcd_cluster // +build !ignore_autogenerated_etcd_cluster /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by conversion-gen. DO NOT EDIT. package v1alpha3 import ( unsafe "unsafe" apiv1alpha3 "github.com/aws/etcdadm-bootstrap-provider/api/v1alpha3" v1beta1 "github.com/aws/etcdadm-controller/api/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" clusterapiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" ) func init() { localSchemeBuilder.Register(RegisterConversions) } // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { if err := s.AddGeneratedConversionFunc((*EtcdadmCluster)(nil), (*v1beta1.EtcdadmCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmCluster_To_v1beta1_EtcdadmCluster(a.(*EtcdadmCluster), b.(*v1beta1.EtcdadmCluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmCluster)(nil), (*EtcdadmCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmCluster_To_v1alpha3_EtcdadmCluster(a.(*v1beta1.EtcdadmCluster), b.(*EtcdadmCluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*EtcdadmClusterList)(nil), (*v1beta1.EtcdadmClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmClusterList_To_v1beta1_EtcdadmClusterList(a.(*EtcdadmClusterList), b.(*v1beta1.EtcdadmClusterList), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmClusterList)(nil), (*EtcdadmClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmClusterList_To_v1alpha3_EtcdadmClusterList(a.(*v1beta1.EtcdadmClusterList), b.(*EtcdadmClusterList), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*EtcdadmClusterSpec)(nil), (*v1beta1.EtcdadmClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmClusterSpec_To_v1beta1_EtcdadmClusterSpec(a.(*EtcdadmClusterSpec), b.(*v1beta1.EtcdadmClusterSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmClusterSpec)(nil), (*EtcdadmClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmClusterSpec_To_v1alpha3_EtcdadmClusterSpec(a.(*v1beta1.EtcdadmClusterSpec), b.(*EtcdadmClusterSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*EtcdadmClusterStatus)(nil), (*v1beta1.EtcdadmClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_EtcdadmClusterStatus_To_v1beta1_EtcdadmClusterStatus(a.(*EtcdadmClusterStatus), b.(*v1beta1.EtcdadmClusterStatus), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*v1beta1.EtcdadmClusterStatus)(nil), (*EtcdadmClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EtcdadmClusterStatus_To_v1alpha3_EtcdadmClusterStatus(a.(*v1beta1.EtcdadmClusterStatus), b.(*EtcdadmClusterStatus), scope) }); err != nil { return err } return nil } func autoConvert_v1alpha3_EtcdadmCluster_To_v1beta1_EtcdadmCluster(in *EtcdadmCluster, out *v1beta1.EtcdadmCluster, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha3_EtcdadmClusterSpec_To_v1beta1_EtcdadmClusterSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1alpha3_EtcdadmClusterStatus_To_v1beta1_EtcdadmClusterStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1alpha3_EtcdadmCluster_To_v1beta1_EtcdadmCluster is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmCluster_To_v1beta1_EtcdadmCluster(in *EtcdadmCluster, out *v1beta1.EtcdadmCluster, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmCluster_To_v1beta1_EtcdadmCluster(in, out, s) } func autoConvert_v1beta1_EtcdadmCluster_To_v1alpha3_EtcdadmCluster(in *v1beta1.EtcdadmCluster, out *EtcdadmCluster, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_EtcdadmClusterSpec_To_v1alpha3_EtcdadmClusterSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1beta1_EtcdadmClusterStatus_To_v1alpha3_EtcdadmClusterStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1beta1_EtcdadmCluster_To_v1alpha3_EtcdadmCluster is an autogenerated conversion function. func Convert_v1beta1_EtcdadmCluster_To_v1alpha3_EtcdadmCluster(in *v1beta1.EtcdadmCluster, out *EtcdadmCluster, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmCluster_To_v1alpha3_EtcdadmCluster(in, out, s) } func autoConvert_v1alpha3_EtcdadmClusterList_To_v1beta1_EtcdadmClusterList(in *EtcdadmClusterList, out *v1beta1.EtcdadmClusterList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.EtcdadmCluster, len(*in)) for i := range *in { if err := Convert_v1alpha3_EtcdadmCluster_To_v1beta1_EtcdadmCluster(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // Convert_v1alpha3_EtcdadmClusterList_To_v1beta1_EtcdadmClusterList is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmClusterList_To_v1beta1_EtcdadmClusterList(in *EtcdadmClusterList, out *v1beta1.EtcdadmClusterList, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmClusterList_To_v1beta1_EtcdadmClusterList(in, out, s) } func autoConvert_v1beta1_EtcdadmClusterList_To_v1alpha3_EtcdadmClusterList(in *v1beta1.EtcdadmClusterList, out *EtcdadmClusterList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EtcdadmCluster, len(*in)) for i := range *in { if err := Convert_v1beta1_EtcdadmCluster_To_v1alpha3_EtcdadmCluster(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // Convert_v1beta1_EtcdadmClusterList_To_v1alpha3_EtcdadmClusterList is an autogenerated conversion function. func Convert_v1beta1_EtcdadmClusterList_To_v1alpha3_EtcdadmClusterList(in *v1beta1.EtcdadmClusterList, out *EtcdadmClusterList, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmClusterList_To_v1alpha3_EtcdadmClusterList(in, out, s) } func autoConvert_v1alpha3_EtcdadmClusterSpec_To_v1beta1_EtcdadmClusterSpec(in *EtcdadmClusterSpec, out *v1beta1.EtcdadmClusterSpec, s conversion.Scope) error { out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) out.InfrastructureTemplate = in.InfrastructureTemplate if err := apiv1alpha3.Convert_v1alpha3_EtcdadmConfigSpec_To_v1beta1_EtcdadmConfigSpec(&in.EtcdadmConfigSpec, &out.EtcdadmConfigSpec, s); err != nil { return err } return nil } // Convert_v1alpha3_EtcdadmClusterSpec_To_v1beta1_EtcdadmClusterSpec is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmClusterSpec_To_v1beta1_EtcdadmClusterSpec(in *EtcdadmClusterSpec, out *v1beta1.EtcdadmClusterSpec, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmClusterSpec_To_v1beta1_EtcdadmClusterSpec(in, out, s) } func autoConvert_v1beta1_EtcdadmClusterSpec_To_v1alpha3_EtcdadmClusterSpec(in *v1beta1.EtcdadmClusterSpec, out *EtcdadmClusterSpec, s conversion.Scope) error { out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) out.InfrastructureTemplate = in.InfrastructureTemplate if err := apiv1alpha3.Convert_v1beta1_EtcdadmConfigSpec_To_v1alpha3_EtcdadmConfigSpec(&in.EtcdadmConfigSpec, &out.EtcdadmConfigSpec, s); err != nil { return err } return nil } // Convert_v1beta1_EtcdadmClusterSpec_To_v1alpha3_EtcdadmClusterSpec is an autogenerated conversion function. func Convert_v1beta1_EtcdadmClusterSpec_To_v1alpha3_EtcdadmClusterSpec(in *v1beta1.EtcdadmClusterSpec, out *EtcdadmClusterSpec, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmClusterSpec_To_v1alpha3_EtcdadmClusterSpec(in, out, s) } func autoConvert_v1alpha3_EtcdadmClusterStatus_To_v1beta1_EtcdadmClusterStatus(in *EtcdadmClusterStatus, out *v1beta1.EtcdadmClusterStatus, s conversion.Scope) error { out.ReadyReplicas = in.ReadyReplicas out.InitMachineAddress = in.InitMachineAddress out.Initialized = in.Initialized out.Ready = in.Ready out.CreationComplete = in.CreationComplete out.Endpoints = in.Endpoints out.Selector = in.Selector out.ObservedGeneration = in.ObservedGeneration out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_v1alpha3_EtcdadmClusterStatus_To_v1beta1_EtcdadmClusterStatus is an autogenerated conversion function. func Convert_v1alpha3_EtcdadmClusterStatus_To_v1beta1_EtcdadmClusterStatus(in *EtcdadmClusterStatus, out *v1beta1.EtcdadmClusterStatus, s conversion.Scope) error { return autoConvert_v1alpha3_EtcdadmClusterStatus_To_v1beta1_EtcdadmClusterStatus(in, out, s) } func autoConvert_v1beta1_EtcdadmClusterStatus_To_v1alpha3_EtcdadmClusterStatus(in *v1beta1.EtcdadmClusterStatus, out *EtcdadmClusterStatus, s conversion.Scope) error { out.ReadyReplicas = in.ReadyReplicas out.InitMachineAddress = in.InitMachineAddress out.Initialized = in.Initialized out.Ready = in.Ready out.CreationComplete = in.CreationComplete out.Endpoints = in.Endpoints out.Selector = in.Selector out.ObservedGeneration = in.ObservedGeneration out.Conditions = *(*clusterapiapiv1alpha3.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_v1beta1_EtcdadmClusterStatus_To_v1alpha3_EtcdadmClusterStatus is an autogenerated conversion function. func Convert_v1beta1_EtcdadmClusterStatus_To_v1alpha3_EtcdadmClusterStatus(in *v1beta1.EtcdadmClusterStatus, out *EtcdadmClusterStatus, s conversion.Scope) error { return autoConvert_v1beta1_EtcdadmClusterStatus_To_v1alpha3_EtcdadmClusterStatus(in, out, s) }
221
etcdadm-controller
aws
Go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by controller-gen. DO NOT EDIT. package v1alpha3 import ( "k8s.io/apimachinery/pkg/runtime" apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmCluster) DeepCopyInto(out *EtcdadmCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmCluster. func (in *EtcdadmCluster) DeepCopy() *EtcdadmCluster { if in == nil { return nil } out := new(EtcdadmCluster) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmCluster) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmClusterList) DeepCopyInto(out *EtcdadmClusterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EtcdadmCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmClusterList. func (in *EtcdadmClusterList) DeepCopy() *EtcdadmClusterList { if in == nil { return nil } out := new(EtcdadmClusterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmClusterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmClusterSpec) DeepCopyInto(out *EtcdadmClusterSpec) { *out = *in if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in } out.InfrastructureTemplate = in.InfrastructureTemplate in.EtcdadmConfigSpec.DeepCopyInto(&out.EtcdadmConfigSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmClusterSpec. func (in *EtcdadmClusterSpec) DeepCopy() *EtcdadmClusterSpec { if in == nil { return nil } out := new(EtcdadmClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmClusterStatus) DeepCopyInto(out *EtcdadmClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make(apiv1alpha3.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmClusterStatus. func (in *EtcdadmClusterStatus) DeepCopy() *EtcdadmClusterStatus { if in == nil { return nil } out := new(EtcdadmClusterStatus) in.DeepCopyInto(out) return out }
131
etcdadm-controller
aws
Go
package v1beta1 import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" const ( // EtcdMachinesSpecUpToDateCondition documents that the spec of the machines controlled by the EtcdadmCluster // is up to date. When this condition is false, the EtcdadmCluster is executing a rolling upgrade. EtcdMachinesSpecUpToDateCondition clusterv1.ConditionType = "EtcdMachinesSpecUpToDate" // EtcdRollingUpdateInProgressReason (Severity=Warning) documents an EtcdadmCluster object executing a // rolling upgrade for aligning the machines spec to the desired state. EtcdRollingUpdateInProgressReason = "EtcdRollingUpdateInProgress" // EtcdCertificatesAvailableCondition indicates that the etcdadm controller has generated the etcd certs to be used by new members // joining the etcd cluster, and to be used by the controlplane EtcdCertificatesAvailableCondition clusterv1.ConditionType = "EtcdCertificatesAvailable" // EtcdClusterResizeCompleted indicates if cluster is finished with scale up/down or is being resized EtcdClusterResizeCompleted clusterv1.ConditionType = "EtcdClusterResizeCompleted" // EtcdScaleUpInProgressReason indicates scale up is in progress EtcdScaleUpInProgressReason = "ScalingUp" // EtcdScaleDownInProgressReason indicates scale down is in progress EtcdScaleDownInProgressReason = "ScalingDown" // InitializedCondition shows if etcd cluster has been initialized, which is when the first etcd member has been initialized InitializedCondition clusterv1.ConditionType = "Initialized" // WaitingForEtcdadmInitReason shows that the first etcd member has not been created yet WaitingForEtcdadmInitReason = "WaitingForEtcdadmInit" // EtcdMachinesReadyCondition stores an aggregate status of all owned machines EtcdMachinesReadyCondition clusterv1.ConditionType = "EtcdMachinesReady" // EtcdClusterHasNoOutdatedMembersCondition indicates that all etcd members are up-to-date. NOTE: this includes even members present on Machines not owned by the // etcdadm cluster EtcdClusterHasNoOutdatedMembersCondition clusterv1.ConditionType = "EtcdClusterHasNoOutdatedMachines" // EtcdClusterHasOutdatedMembersReason shows that some of the etcd members are out-of-date EtcdClusterHasOutdatedMembersReason = "EtcdClusterHasOutdatedMachines" // EtcdEndpointsAvailable shows that all endpoints of the etcd cluster passed healthcheck and are available EtcdEndpointsAvailable = "EtcdEndpointsAvailable" // WaitingForEtcdadmEndpointsToPassHealthcheckReason shows that some of the etcd members are not ready yet WaitingForEtcdadmEndpointsToPassHealthcheckReason = "WaitingForEtcdadmEndpointsToPassHealthcheck" )
49
etcdadm-controller
aws
Go
package v1beta1 // Hub marks EtcdadmCluster as a conversion hub. func (*EtcdadmCluster) Hub() {} // Hub marks EtcdadmClusterList as a conversion hub. func (*EtcdadmClusterList) Hub() {}
8
etcdadm-controller
aws
Go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1beta1 contains API Schema definitions for the etcd cluster v1beta1 API group // +kubebuilder:object:generate=true // +groupName=etcdcluster.cluster.x-k8s.io package v1beta1
21
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( etcdbp "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( UpgradeInProgressAnnotation = "etcdcluster.cluster.x-k8s.io/upgrading" // EtcdadmClusterFinalizer is the finalizer applied to EtcdadmCluster resources // by its managing controller. EtcdadmClusterFinalizer = "etcdcluster.cluster.x-k8s.io" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // EtcdadmClusterSpec defines the desired state of EtcdadmCluster type EtcdadmClusterSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file Replicas *int32 `json:"replicas,omitempty"` // InfrastructureTemplate is a required reference to a custom resource // offered by an infrastructure provider. InfrastructureTemplate corev1.ObjectReference `json:"infrastructureTemplate"` // +optional EtcdadmConfigSpec etcdbp.EtcdadmConfigSpec `json:"etcdadmConfigSpec"` } // EtcdadmClusterStatus defines the observed state of EtcdadmCluster type EtcdadmClusterStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file // Total number of non-terminated machines targeted by this etcd cluster // (their labels match the selector). // +optional ReadyReplicas int32 `json:"replicas,omitempty"` // +optional InitMachineAddress string `json:"initMachineAddress"` // +optional Initialized bool `json:"initialized"` // Ready reflects the state of the etcd cluster, whether all of its members have passed healthcheck and are ready to serve requests or not. // +optional Ready bool `json:"ready"` // CreationComplete gets set to true once the etcd cluster is created. Its value never changes after that. // It is used as a way to indicate that the periodic healthcheck loop can be run for the particular etcd cluster. // +optional CreationComplete bool `json:"creationComplete"` // +optional Endpoints string `json:"endpoints"` // Selector is the label selector in string format to avoid introspection // by clients, and is used to provide the CRD-based integration for the // scale subresource and additional integrations for things like kubectl // describe.. The string will be in the same format as the query-param syntax. // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional Selector string `json:"selector,omitempty"` // ObservedGeneration is the latest generation observed by the controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` // Conditions defines current service state of the EtcdadmCluster. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion // EtcdadmCluster is the Schema for the etcdadmclusters API type EtcdadmCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec EtcdadmClusterSpec `json:"spec,omitempty"` Status EtcdadmClusterStatus `json:"status,omitempty"` } func (in *EtcdadmCluster) GetConditions() clusterv1.Conditions { return in.Status.Conditions } func (in *EtcdadmCluster) SetConditions(conditions clusterv1.Conditions) { in.Status.Conditions = conditions } // +kubebuilder:object:root=true // EtcdadmClusterList contains a list of EtcdadmCluster type EtcdadmClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []EtcdadmCluster `json:"items"` } func init() { SchemeBuilder.Register(&EtcdadmCluster{}, &EtcdadmClusterList{}) }
130
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" ) // log is for logging in this package. var etcdadmclusterlog = logf.Log.WithName("etcdadmcluster-resource") func (r *EtcdadmCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). Complete() } // +kubebuilder:webhook:verbs=create;update,path=/mutate-etcdcluster-cluster-x-k8s-io-v1beta1-etcdadmcluster,mutating=true,failurePolicy=fail,groups=etcdcluster.cluster.x-k8s.io,resources=etcdadmclusters,versions=v1beta1,name=metcdadmcluster.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.Defaulter = &EtcdadmCluster{} // +kubebuilder:webhook:verbs=create;update,path=/validate-etcdcluster-cluster-x-k8s-io-v1beta1-etcdadmcluster,mutating=false,failurePolicy=fail,groups=etcdcluster.cluster.x-k8s.io,resources=etcdadmclusters,versions=v1beta1,name=vetcdadmcluster.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.Validator = &EtcdadmCluster{} // Default implements webhook.Defaulter so a webhook will be registered for the type func (r *EtcdadmCluster) Default() { etcdadmclusterlog.Info("default", "name", r.Name) if r.Spec.Replicas == nil { replicas := int32(1) r.Spec.Replicas = &replicas } if r.Spec.InfrastructureTemplate.Namespace == "" { r.Spec.InfrastructureTemplate.Namespace = r.Namespace } } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (r *EtcdadmCluster) ValidateCreate() error { etcdadmclusterlog.Info("validate create", "name", r.Name) allErrs := r.validateCommon() if len(allErrs) > 0 { return apierrors.NewInvalid(GroupVersion.WithKind("EtcdadmCluster").GroupKind(), r.Name, allErrs) } return nil } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type func (r *EtcdadmCluster) ValidateUpdate(old runtime.Object) error { etcdadmclusterlog.Info("validate update", "name", r.Name) oldEtcdadmCluster, ok := old.(*EtcdadmCluster) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected an EtcdadmCluster object but got a %T", old)) } if *oldEtcdadmCluster.Spec.Replicas != *r.Spec.Replicas { return field.Invalid(field.NewPath("spec", "replicas"), r.Spec.Replicas, "field is immutable") } allErrs := r.validateCommon() if len(allErrs) > 0 { return apierrors.NewInvalid(GroupVersion.WithKind("EtcdadmCluster").GroupKind(), r.Name, allErrs) } return nil } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type func (r *EtcdadmCluster) ValidateDelete() error { etcdadmclusterlog.Info("validate delete", "name", r.Name) // TODO(user): fill in your validation logic upon object deletion. return nil } func (r *EtcdadmCluster) validateCommon() (allErrs field.ErrorList) { if r.Spec.Replicas == nil { allErrs = append( allErrs, field.Required( field.NewPath("spec", "replicas"), "is required", ), ) } else if *r.Spec.Replicas <= 0 { allErrs = append( allErrs, field.Forbidden( field.NewPath("spec", "replicas"), "cannot be less than or equal to 0", ), ) } else if r.Spec.Replicas != nil && *r.Spec.Replicas%2 == 0 { allErrs = append( allErrs, field.Forbidden( field.NewPath("spec", "replicas"), "etcd cluster cannot have an even number of nodes", ), ) } if r.Spec.InfrastructureTemplate.Namespace != r.Namespace { allErrs = append( allErrs, field.Invalid( field.NewPath("spec", "infrastructureTemplate", "namespace"), r.Spec.InfrastructureTemplate.Namespace, "must match metadata.namespace", ), ) } return allErrs }
140
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1beta1 contains API Schema definitions for the etcdcluster v1beta1 API group // +kubebuilder:object:generate=true // +groupName=etcdcluster.cluster.x-k8s.io package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "etcdcluster.cluster.x-k8s.io", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme )
37
etcdadm-controller
aws
Go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by controller-gen. DO NOT EDIT. package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmCluster) DeepCopyInto(out *EtcdadmCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmCluster. func (in *EtcdadmCluster) DeepCopy() *EtcdadmCluster { if in == nil { return nil } out := new(EtcdadmCluster) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmCluster) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmClusterList) DeepCopyInto(out *EtcdadmClusterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EtcdadmCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmClusterList. func (in *EtcdadmClusterList) DeepCopy() *EtcdadmClusterList { if in == nil { return nil } out := new(EtcdadmClusterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EtcdadmClusterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmClusterSpec) DeepCopyInto(out *EtcdadmClusterSpec) { *out = *in if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in } out.InfrastructureTemplate = in.InfrastructureTemplate in.EtcdadmConfigSpec.DeepCopyInto(&out.EtcdadmConfigSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmClusterSpec. func (in *EtcdadmClusterSpec) DeepCopy() *EtcdadmClusterSpec { if in == nil { return nil } out := new(EtcdadmClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdadmClusterStatus) DeepCopyInto(out *EtcdadmClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make(apiv1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmClusterStatus. func (in *EtcdadmClusterStatus) DeepCopy() *EtcdadmClusterStatus { if in == nil { return nil } out := new(EtcdadmClusterStatus) in.DeepCopyInto(out) return out }
131
etcdadm-controller
aws
Go
package controllers import ( "context" "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "sigs.k8s.io/cluster-api/util/conditions" "path/filepath" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" certutil "k8s.io/client-go/util/cert" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/etcdadm/certs/pkiutil" "sigs.k8s.io/etcdadm/constants" ) // etcdadm provisioning works as follows: // machine one runs etcdadm init, generates CA and client certs // CA certs are copied over to remaining nodes to run etcdadm join // This provider is going to generate CA cert-key for etcd, and create two Secrets to store CA cert + client cert-key to be used by kube-apiserver func (r *EtcdadmClusterReconciler) generateCAandClientCertSecrets(ctx context.Context, cluster *clusterv1.Cluster, etcdCluster *etcdv1.EtcdadmCluster) error { log := r.Log // Generate external etcd CA cert + key pair CACertKeyPair := etcdCACertKeyPair() err := CACertKeyPair.LookupOrGenerate( ctx, r.Client, util.ObjectKey(cluster), *metav1.NewControllerRef(etcdCluster, etcdv1.GroupVersion.WithKind("EtcdadmCluster")), ) if err != nil { log.Error(err, "Failed to look up or generate CA cert key pair") return err } caCertKey := CACertKeyPair.GetByPurpose(secret.ManagedExternalEtcdCA) if caCertKey == nil { return fmt.Errorf("nil returned from getting etcd CA certificate by purpose %s", secret.ManagedExternalEtcdCA) } // Use the generated CA cert+key pair to generate and sign etcd client cert+key pair caCertDecoded, _ := pem.Decode(caCertKey.KeyPair.Cert) caCert, err := x509.ParseCertificate(caCertDecoded.Bytes) if err != nil { log.Error(err, "Failed to parse etcd CA cert") return err } caKeyDecoded, _ := pem.Decode(caCertKey.KeyPair.Key) caKey, err := x509.ParsePKCS1PrivateKey(caKeyDecoded.Bytes) if err != nil { log.Error(err, "Failed to parse etcd CA key") return err } commonName := fmt.Sprintf("%s-kube-apiserver-etcd-client", cluster.Name) // This certConfig is what etcdadm uses to generate client certs https://github.com/kubernetes-sigs/etcdadm/blob/master/certs/certs.go#L233 certConfig := certutil.Config{ CommonName: commonName, Organization: []string{constants.MastersGroup}, Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, } apiClientCert, apiClientKey, err := pkiutil.NewCertAndKey(caCert, caKey, certConfig) if err != nil { return fmt.Errorf("failure while creating %q etcd client key and certificate: %v", commonName, err) } // Now generate two Secrets, one containing the client cert+key pair and other containing the etcd CA cert. Ech control plane provider should // use these two Secrets for communicating with etcd. apiServerClientCertKeyPair := secret.Certificate{ Purpose: secret.APIServerEtcdClient, KeyPair: &certs.KeyPair{ Cert: certs.EncodeCertPEM(apiClientCert), Key: certs.EncodePrivateKeyPEM(apiClientKey), }, Generated: true, } s := apiServerClientCertKeyPair.AsSecret(client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}, *metav1.NewControllerRef(etcdCluster, etcdv1.GroupVersion.WithKind("EtcdadmCluster"))) if err := r.Client.Create(ctx, s); err != nil && !apierrors.IsAlreadyExists(err) { return fmt.Errorf("failure while saving etcd client key and certificate: %v", err) } log.Info("Saved apiserver client cert key as secret") s = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: secret.Name(cluster.Name, secret.EtcdCA), Labels: map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, }, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(etcdCluster, etcdv1.GroupVersion.WithKind("EtcdadmCluster"))}, }, Data: map[string][]byte{ secret.TLSCrtDataName: caCertKey.KeyPair.Cert, }, Type: clusterv1.ClusterSecretType, } if err := r.Client.Create(ctx, s); err != nil && !apierrors.IsAlreadyExists(err) { return fmt.Errorf("failure while saving etcd CA certificate: %v", err) } log.Info("Saved etcd ca cert as secret") conditions.MarkTrue(etcdCluster, etcdv1.EtcdCertificatesAvailableCondition) return nil } func etcdCACertKeyPair() secret.Certificates { certificatesDir := "/etc/etcd/pki" certificates := secret.Certificates{ &secret.Certificate{ Purpose: secret.ManagedExternalEtcdCA, CertFile: filepath.Join(certificatesDir, "ca.crt"), KeyFile: filepath.Join(certificatesDir, "ca.key"), }, } return certificates } // TODO: save CA and client cert on the reconciler object func (r *EtcdadmClusterReconciler) getCACert(ctx context.Context, cluster *clusterv1.Cluster) ([]byte, error) { caCert := &secret.Certificates{ &secret.Certificate{ Purpose: secret.ManagedExternalEtcdCA, }, } if err := caCert.Lookup(ctx, r.Client, util.ObjectKey(cluster)); err != nil { return []byte{}, errors.Wrap(err, "error looking up external etcd CA certs") } if caCertKey := caCert.GetByPurpose(secret.ManagedExternalEtcdCA); caCertKey != nil { if caCertKey.KeyPair == nil { return []byte{}, errors.New("ca cert key pair not found for cluster") } return caCertKey.KeyPair.Cert, nil } return []byte{}, fmt.Errorf("nil returned from getting etcd CA certificate by purpose %s", secret.ManagedExternalEtcdCA) } func (r *EtcdadmClusterReconciler) getClientCerts(ctx context.Context, cluster *clusterv1.Cluster) (tls.Certificate, error) { clientCert := &secret.Certificates{ &secret.Certificate{ Purpose: secret.APIServerEtcdClient, }, } if err := clientCert.Lookup(ctx, r.Client, util.ObjectKey(cluster)); err != nil { return tls.Certificate{}, err } if clientCertKey := clientCert.GetByPurpose(secret.APIServerEtcdClient); clientCertKey != nil { return tls.X509KeyPair(clientCertKey.KeyPair.Cert, clientCertKey.KeyPair.Key) } return tls.Certificate{}, fmt.Errorf("nil returned from getting etcd CA certificate by purpose %s", secret.APIServerEtcdClient) }
165
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "fmt" "time" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" ) // EtcdadmClusterReconciler reconciles a EtcdadmCluster object type EtcdadmClusterReconciler struct { controller controller.Controller client.Client recorder record.EventRecorder uncachedClient client.Reader Log logr.Logger Scheme *runtime.Scheme etcdHealthCheckConfig etcdHealthCheckConfig MaxConcurrentReconciles int } func (r *EtcdadmClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, done <-chan struct{}) error { c, err := ctrl.NewControllerManagedBy(mgr). For(&etcdv1.EtcdadmCluster{}). Owns(&clusterv1.Machine{}). WithEventFilter(predicates.ResourceNotPaused(r.Log)). WithOptions(controller.Options{MaxConcurrentReconciles: r.MaxConcurrentReconciles}). Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } err = c.Watch( &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(r.ClusterToEtcdadmCluster), predicates.ClusterUnpausedAndInfrastructureReady(r.Log), ) if err != nil { return errors.Wrap(err, "failed adding Watch for Clusters to controller manager") } r.controller = c r.recorder = mgr.GetEventRecorderFor("etcdadm-cluster-controller") r.uncachedClient = mgr.GetAPIReader() go r.startHealthCheckLoop(ctx, done) return nil } // +kubebuilder:rbac:groups=etcdcluster.cluster.x-k8s.io,resources=etcdadmclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=etcdcluster.cluster.x-k8s.io,resources=etcdadmclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=etcdadmconfigs;etcdadmconfigs/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=configmaps;events;secrets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete func (r *EtcdadmClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { log := r.Log.WithValues("etcdadmcluster", req.NamespacedName) // Lookup the etcdadm cluster object etcdCluster := &etcdv1.EtcdadmCluster{} if err := r.Client.Get(ctx, req.NamespacedName, etcdCluster); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } log.Error(err, "Failed to get etcdadm cluster") return ctrl.Result{}, err } // Fetch the CAPI Cluster. cluster, err := util.GetOwnerCluster(ctx, r.Client, etcdCluster.ObjectMeta) if err != nil { log.Error(err, "Failed to retrieve owner Cluster from the API Server") return ctrl.Result{}, err } if cluster == nil { log.Info("Cluster Controller has not yet set OwnerRef on etcd") return ctrl.Result{}, nil } if !cluster.Status.InfrastructureReady { log.Info("Infrastructure cluster is not yet ready") return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } if annotations.IsPaused(cluster, etcdCluster) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } // Initialize the patch helper. patchHelper, err := patch.NewHelper(etcdCluster, r.Client) if err != nil { log.Error(err, "Failed to configure the patch helper") return ctrl.Result{Requeue: true}, nil } // Add finalizer first if it does not exist to avoid the race condition between init and delete if !controllerutil.ContainsFinalizer(etcdCluster, etcdv1.EtcdadmClusterFinalizer) { controllerutil.AddFinalizer(etcdCluster, etcdv1.EtcdadmClusterFinalizer) // patch and return right away instead of reusing the main defer, // because the main defer may take too much time to get cluster status patchOpts := []patch.Option{patch.WithStatusObservedGeneration{}} if err := patchHelper.Patch(ctx, etcdCluster, patchOpts...); err != nil { log.Error(err, "Failed to patch EtcdadmCluster to add finalizer") return ctrl.Result{}, err } return ctrl.Result{}, nil } defer func() { etcdMachines, err := r.checkOwnedMachines(ctx, log, etcdCluster, cluster) if err != nil { reterr = kerrors.NewAggregate([]error{reterr, err}) return } else { if err := r.updateMachinesEtcdReadyLabel(ctx, log, etcdMachines); err != nil { log.Error(err, "Failed to update etcd ready labels in machines") reterr = kerrors.NewAggregate([]error{reterr, err}) } // Always attempt to update status. if err := r.updateStatus(ctx, etcdCluster, cluster, etcdMachines); err != nil { log.Error(err, "Failed to update EtcdadmCluster Status") reterr = kerrors.NewAggregate([]error{reterr, err}) } } if conditions.IsFalse(etcdCluster, etcdv1.EtcdMachinesSpecUpToDateCondition) && conditions.GetReason(etcdCluster, etcdv1.EtcdMachinesSpecUpToDateCondition) == etcdv1.EtcdRollingUpdateInProgressReason { // set ready to false, so that CAPI cluster controller will pause KCP so it doesn't keep checking if endpoints are updated etcdCluster.Status.Ready = false } // Always attempt to Patch the EtcdadmCluster object and status after each reconciliation. if err := patchEtcdCluster(ctx, patchHelper, etcdCluster); err != nil { log.Error(err, "Failed to patch EtcdadmCluster") reterr = kerrors.NewAggregate([]error{reterr, err}) } if reterr == nil && !res.Requeue && !(res.RequeueAfter > 0) && etcdCluster.ObjectMeta.DeletionTimestamp.IsZero() { if !etcdCluster.Status.Ready { res = ctrl.Result{RequeueAfter: 20 * time.Second} } } }() if !etcdCluster.ObjectMeta.DeletionTimestamp.IsZero() { // Handle deletion reconciliation loop. return r.reconcileDelete(ctx, etcdCluster, cluster) } return r.reconcile(ctx, etcdCluster, cluster) } func (r *EtcdadmClusterReconciler) reconcile(ctx context.Context, etcdCluster *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster) (ctrl.Result, error) { log := r.Log.WithName(etcdCluster.Name) var desiredReplicas int // Reconcile the external infrastructure reference. if err := r.reconcileExternalReference(ctx, cluster, etcdCluster.Spec.InfrastructureTemplate); err != nil { return ctrl.Result{}, err } etcdMachines, err := collections.GetFilteredMachinesForCluster(ctx, r.uncachedClient, cluster, EtcdClusterMachines(cluster.Name, etcdCluster.Name)) if err != nil { return ctrl.Result{}, errors.Wrap(err, "Error filtering machines for etcd cluster") } ownedMachines := etcdMachines.Filter(collections.OwnedMachines(etcdCluster)) ep, err := NewEtcdPlane(ctx, r.Client, cluster, etcdCluster, ownedMachines) if err != nil { return ctrl.Result{}, errors.Wrap(err, "Error initializing internal object EtcdPlane") } if len(ownedMachines) != len(etcdMachines) { if conditions.IsUnknown(etcdCluster, etcdv1.EtcdClusterHasNoOutdatedMembersCondition) || conditions.IsTrue(etcdCluster, etcdv1.EtcdClusterHasNoOutdatedMembersCondition) { conditions.MarkFalse(etcdCluster, etcdv1.EtcdClusterHasNoOutdatedMembersCondition, etcdv1.EtcdClusterHasOutdatedMembersReason, clusterv1.ConditionSeverityInfo, "%d etcd members have outdated spec", len(etcdMachines.Difference(ownedMachines))) } /* These would be the out-of-date etcd machines still belonging to the current etcd cluster as etcd members, but not owned by the EtcdadmCluster object When upgrading a cluster, etcd machines need to be upgraded first so that the new etcd endpoints become available. But the outdated controlplane machines will keep trying to connect to the etcd members they were configured with. So we cannot delete these older etcd members till controlplane rollout has finished. So this is only possible after an upgrade, and these machines can be deleted only after controlplane upgrade has finished. */ if _, ok := etcdCluster.Annotations[clusterv1.ControlPlaneUpgradeCompletedAnnotation]; ok { outdatedMachines := etcdMachines.Difference(ownedMachines) log.Info(fmt.Sprintf("Controlplane upgrade has completed, deleting older outdated etcd members: %v", outdatedMachines.Names())) for _, outdatedMachine := range outdatedMachines { outdatedMachineAddress := getEtcdMachineAddress(outdatedMachine) if err := r.removeEtcdMachine(ctx, etcdCluster, cluster, outdatedMachine, outdatedMachineAddress); err != nil { return ctrl.Result{}, err } } // requeue so controller reconciles after last machine is deleted and the "EtcdClusterHasNoOutdatedMembersCondition" is marked true return ctrl.Result{Requeue: true}, nil } } else { if _, ok := etcdCluster.Annotations[clusterv1.ControlPlaneUpgradeCompletedAnnotation]; ok { log.Info("Outdated etcd members deleted, removing controlplane-upgrade complete annotation") delete(etcdCluster.Annotations, clusterv1.ControlPlaneUpgradeCompletedAnnotation) } if conditions.IsFalse(etcdCluster, etcdv1.EtcdClusterHasNoOutdatedMembersCondition) { log.Info(fmt.Sprintf("Outdated etcd members deleted, setting %s to true", etcdv1.EtcdClusterHasNoOutdatedMembersCondition)) conditions.MarkTrue(etcdCluster, etcdv1.EtcdClusterHasNoOutdatedMembersCondition) } } // This aggregates the state of all machines conditions.SetAggregate(etcdCluster, etcdv1.EtcdMachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) numCurrentMachines := len(ownedMachines) desiredReplicas = int(*etcdCluster.Spec.Replicas) // Etcd machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations. needRollout := ep.MachinesNeedingRollout() switch { case len(needRollout) > 0: log.Info("Rolling out Etcd machines", "needRollout", needRollout.Names()) if conditions.IsFalse(ep.EC, etcdv1.EtcdMachinesSpecUpToDateCondition) && len(ep.UpToDateMachines()) > 0 { // update is already in progress, some machines have been rolled out with the new spec newestUpToDateMachine := ep.NewestUpToDateMachine() newestUpToDateMachineCreationTime := newestUpToDateMachine.CreationTimestamp.Time nextMachineUpdateTime := newestUpToDateMachineCreationTime.Add(time.Duration(minEtcdMemberReadySeconds) * time.Second) if nextMachineUpdateTime.After(time.Now()) { // the latest machine with updated spec should get more time for etcd data sync // requeue this after after := time.Until(nextMachineUpdateTime) log.Info(fmt.Sprintf("Requeueing etcdadm cluster for updating next machine after %s", after.String())) return ctrl.Result{RequeueAfter: after}, nil } // otherwise, if the minimum time to wait between successive machine updates has passed, // check that the latest etcd member is ready address := getEtcdMachineAddress(newestUpToDateMachine) if address == "" { return ctrl.Result{}, nil } // if member passes healthcheck, that is proof that data sync happened and we can proceed further with upgrade if err := r.performEndpointHealthCheck(ctx, cluster, getMemberClientURL(address), true); err != nil { return ctrl.Result{}, err } } conditions.MarkFalse(ep.EC, etcdv1.EtcdMachinesSpecUpToDateCondition, etcdv1.EtcdRollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), len(ep.Machines)-len(needRollout)) return r.upgradeEtcdCluster(ctx, cluster, etcdCluster, ep, needRollout) default: // make sure last upgrade operation is marked as completed. // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first // reconciliation/before a rolling upgrade actually starts. if conditions.Has(ep.EC, etcdv1.EtcdMachinesSpecUpToDateCondition) { conditions.MarkTrue(ep.EC, etcdv1.EtcdMachinesSpecUpToDateCondition) _, hasUpgradeAnnotation := etcdCluster.Annotations[etcdv1.UpgradeInProgressAnnotation] if hasUpgradeAnnotation { delete(etcdCluster.Annotations, etcdv1.UpgradeInProgressAnnotation) } } } switch { case numCurrentMachines < desiredReplicas && numCurrentMachines == 0: // Create first etcd machine to run etcdadm init log.Info("Initializing etcd cluster", "Desired", desiredReplicas, "Existing", numCurrentMachines) conditions.MarkFalse(etcdCluster, etcdv1.InitializedCondition, etcdv1.WaitingForEtcdadmInitReason, clusterv1.ConditionSeverityInfo, "") conditions.MarkFalse(etcdCluster, etcdv1.EtcdEndpointsAvailable, etcdv1.WaitingForEtcdadmEndpointsToPassHealthcheckReason, clusterv1.ConditionSeverityInfo, "") return r.intializeEtcdCluster(ctx, etcdCluster, cluster, ep) case numCurrentMachines > 0 && conditions.IsFalse(etcdCluster, etcdv1.InitializedCondition): // as soon as first etcd machine is up, etcdadm init would be run on it to initialize the etcd cluster, update the condition if !etcdCluster.Status.Initialized { // defer func in Reconcile will requeue it after 20 sec return ctrl.Result{}, nil } // since etcd cluster has been initialized conditions.MarkTrue(etcdCluster, etcdv1.InitializedCondition) case numCurrentMachines < desiredReplicas && numCurrentMachines > 0: log.Info("Scaling up etcd cluster", "Desired", desiredReplicas, "Existing", numCurrentMachines) return r.scaleUpEtcdCluster(ctx, etcdCluster, cluster, ep) case numCurrentMachines > desiredReplicas: log.Info("Scaling down etcd cluster", "Desired", desiredReplicas, "Existing", numCurrentMachines) // The last parameter corresponds to Machines that need to be rolled out, eg during upgrade, should always be empty here. return r.scaleDownEtcdCluster(ctx, etcdCluster, cluster, ep, collections.Machines{}) } return ctrl.Result{}, nil } func (r *EtcdadmClusterReconciler) reconcileDelete(ctx context.Context, etcdCluster *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) log.Info("Reconcile EtcdadmCluster deletion") etcdMachines, err := collections.GetFilteredMachinesForCluster(ctx, r.uncachedClient, cluster, EtcdClusterMachines(cluster.Name, etcdCluster.Name)) if err != nil { return ctrl.Result{}, errors.Wrap(err, "Error filtering machines for etcd cluster") } ownedMachines := etcdMachines.Filter(collections.OwnedMachines(etcdCluster)) if len(ownedMachines) == 0 { // If no etcd machines are left, remove the finalizer controllerutil.RemoveFinalizer(etcdCluster, etcdv1.EtcdadmClusterFinalizer) return ctrl.Result{}, nil } // This aggregates the state of all machines conditions.SetAggregate(etcdCluster, etcdv1.EtcdMachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) // Delete etcd machines machinesToDelete := ownedMachines.Filter(collections.Not(collections.HasDeletionTimestamp)) var errs []error for _, m := range machinesToDelete { logger := log.WithValues("machine", m) if err := r.Client.Delete(ctx, m); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to cleanup owned machine") errs = append(errs, err) } } if len(errs) > 0 { err := kerrors.NewAggregate(errs) r.recorder.Eventf(etcdCluster, corev1.EventTypeWarning, "FailedDelete", "Failed to delete etcd Machines for cluster %s/%s: %v", cluster.Namespace, cluster.Name, err) return ctrl.Result{}, err } conditions.MarkFalse(etcdCluster, etcdv1.EtcdClusterResizeCompleted, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") // requeue to check if machines are deleted and remove the finalizer return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } // ClusterToEtcdadmCluster is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for EtcdadmCluster based on updates to a Cluster. func (r *EtcdadmClusterReconciler) ClusterToEtcdadmCluster(o client.Object) []ctrl.Request { c, ok := o.(*clusterv1.Cluster) if !ok { panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) } etcdRef := c.Spec.ManagedExternalEtcdRef if etcdRef != nil && etcdRef.Kind == "EtcdadmCluster" { return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: etcdRef.Namespace, Name: etcdRef.Name}}} } return nil } func patchEtcdCluster(ctx context.Context, patchHelper *patch.Helper, ec *etcdv1.EtcdadmCluster) error { // SetSummary sets the Ready condition on an object, in this case the EtcdadmCluster as an aggregate of all conditions defined on EtcdadmCluster conditions.SetSummary(ec, conditions.WithConditions( etcdv1.EtcdMachinesSpecUpToDateCondition, etcdv1.EtcdCertificatesAvailableCondition, etcdv1.EtcdMachinesReadyCondition, etcdv1.EtcdClusterResizeCompleted, etcdv1.InitializedCondition, etcdv1.EtcdClusterHasNoOutdatedMembersCondition, etcdv1.EtcdEndpointsAvailable, ), ) // patch the EtcdadmCluster conditions based on current values at the end of every reconcile return patchHelper.Patch( ctx, ec, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, etcdv1.EtcdMachinesSpecUpToDateCondition, etcdv1.EtcdCertificatesAvailableCondition, etcdv1.EtcdMachinesReadyCondition, etcdv1.EtcdClusterResizeCompleted, etcdv1.InitializedCondition, etcdv1.EtcdClusterHasNoOutdatedMembersCondition, etcdv1.EtcdEndpointsAvailable, }}, patch.WithStatusObservedGeneration{}, ) }
413
etcdadm-controller
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "testing" "time" "k8s.io/apiserver/pkg/storage/names" "k8s.io/utils/pointer" "sigs.k8s.io/cluster-api/util/conditions" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" // +kubebuilder:scaffold:imports ) var ctx = ctrl.SetupSignalHandler() // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") } func setupScheme() *runtime.Scheme { scheme := runtime.NewScheme() if err := clusterv1.AddToScheme(scheme); err != nil { panic(err) } if err := etcdv1.AddToScheme(scheme); err != nil { panic(err) } if err := corev1.AddToScheme(scheme); err != nil { panic(err) } if err := etcdbootstrapv1.AddToScheme(scheme); err != nil { panic(err) } return scheme } const ( testClusterName = "testCluster" testNamespace = "test" testEtcdadmClusterName = "testEtcdadmCluster" testInfrastructureTemplateName = "testInfraTemplate" ) var ( infraTemplate = &unstructured.Unstructured{ Object: map[string]interface{}{ "kind": "InfrastructureTemplate", "apiVersion": "infra.io/v1", "metadata": map[string]interface{}{ "name": testInfrastructureTemplateName, "namespace": testNamespace, }, "spec": map[string]interface{}{ "template": map[string]interface{}{ "spec": map[string]interface{}{ "hello": "world", }, }, }, }, } ) func TestClusterToEtcdadmCluster(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() objects := []client.Object{ cluster, } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() expectedResult := []ctrl.Request{ { NamespacedName: client.ObjectKey{ Namespace: cluster.Spec.ManagedExternalEtcdRef.Namespace, Name: cluster.Spec.ManagedExternalEtcdRef.Name}, }, } r := &EtcdadmClusterReconciler{ Client: fakeClient, Log: log.Log, } got := r.ClusterToEtcdadmCluster(cluster) g.Expect(got).To(Equal(expectedResult)) } func TestReconcileNoClusterOwnerRef(t *testing.T) { g := NewWithT(t) etcdadmCluster := &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: testNamespace, Name: testEtcdadmClusterName, }, Spec: etcdv1.EtcdadmClusterSpec{ EtcdadmConfigSpec: etcdbootstrapv1.EtcdadmConfigSpec{ CloudInitConfig: &etcdbootstrapv1.CloudInitConfig{ Version: "v3.4.9", }, }, }, } objects := []client.Object{ etcdadmCluster, } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, Log: log.Log, } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(ctrl.Result{})) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), machineList, client.InNamespace("test"))).To(Succeed()) g.Expect(machineList.Items).To(BeEmpty()) } func TestReconcilePaused(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() cluster.Spec.Paused = true etcdadmCluster := newEtcdadmCluster(cluster) objects := []client.Object{ cluster, etcdadmCluster, } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, Log: log.Log, } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), machineList, client.InNamespace("test"))).To(Succeed()) g.Expect(machineList.Items).To(BeEmpty()) // Test: etcdcluster is paused and cluster is not cluster.Spec.Paused = false etcdadmCluster.ObjectMeta.Annotations = map[string]string{} etcdadmCluster.ObjectMeta.Annotations[clusterv1.PausedAnnotation] = "paused" _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) } // If cluster infrastructure is not ready, reconcile won't proceed and will requeue etcdadmCluster to be processed after 5 sec func TestReconcileClusterInfrastructureNotReady(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() cluster.Status.InfrastructureReady = false etcdadmCluster := newEtcdadmCluster(cluster) etcdadmCluster.ObjectMeta.Finalizers = []string{} // no machines or etcdadmConfig objects exist for the etcdadm cluster yet, so it should make a call to initialize the cluster // which will create one machine and one etcdadmConfig object objects := []client.Object{ cluster, etcdadmCluster, infraTemplate.DeepCopy(), } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, uncachedClient: fakeClient, Log: log.Log, } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(ctrl.Result{Requeue: false, RequeueAfter: 5 * time.Second})) } func TestReconcileNoFinalizer(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() etcdadmCluster := newEtcdadmCluster(cluster) etcdadmCluster.ObjectMeta.Finalizers = []string{} // no machines or etcdadmConfig objects exist for the etcdadm cluster yet, so it should make a call to initialize the cluster // which will create one machine and one etcdadmConfig object objects := []client.Object{ cluster, etcdadmCluster, infraTemplate.DeepCopy(), } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, uncachedClient: fakeClient, Log: log.Log, } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) updatedEtcdadmCluster := etcdv1.EtcdadmCluster{} g.Expect(fakeClient.Get(ctx, util.ObjectKey(etcdadmCluster), &updatedEtcdadmCluster)).To(Succeed()) g.Expect(len(updatedEtcdadmCluster.Finalizers)).ToNot(BeZero()) } func TestReconcileInitializeEtcdCluster(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() etcdadmCluster := newEtcdadmCluster(cluster) // no machines or etcdadmConfig objects exist for the etcdadm cluster yet, so it should make a call to initialize the cluster // which will create one machine and one etcdadmConfig object objects := []client.Object{ cluster, etcdadmCluster, infraTemplate.DeepCopy(), } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, uncachedClient: fakeClient, Log: log.Log, } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), machineList, client.InNamespace("test"))).To(Succeed()) g.Expect(len(machineList.Items)).To(Equal(1)) etcdadmConfig := &etcdbootstrapv1.EtcdadmConfigList{} g.Expect(fakeClient.List(context.Background(), etcdadmConfig, client.InNamespace("test"))).To(Succeed()) g.Expect(len(etcdadmConfig.Items)).To(Equal(1)) updatedEtcdadmCluster := &etcdv1.EtcdadmCluster{} g.Expect(fakeClient.Get(ctx, util.ObjectKey(etcdadmCluster), updatedEtcdadmCluster)).To(Succeed()) g.Expect(conditions.IsFalse(updatedEtcdadmCluster, etcdv1.InitializedCondition)).To(BeTrue()) g.Expect(conditions.IsFalse(updatedEtcdadmCluster, etcdv1.EtcdEndpointsAvailable)).To(BeTrue()) } func TestReconcile_EtcdClusterNotInitialized(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() etcdadmCluster := newEtcdadmCluster(cluster) // CAPI machine controller has not yet created the first etcd Machine, so it has not yet set Initialized to true etcdadmCluster.Status.Initialized = false conditions.MarkFalse(etcdadmCluster, etcdv1.InitializedCondition, etcdv1.WaitingForEtcdadmInitReason, clusterv1.ConditionSeverityInfo, "") machine := newEtcdMachine(etcdadmCluster, cluster) objects := []client.Object{ cluster, etcdadmCluster, infraTemplate.DeepCopy(), machine, } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, uncachedClient: fakeClient, Log: log.Log, } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) updatedEtcdadmCluster := &etcdv1.EtcdadmCluster{} g.Expect(fakeClient.Get(ctx, util.ObjectKey(etcdadmCluster), updatedEtcdadmCluster)).To(Succeed()) g.Expect(conditions.IsTrue(updatedEtcdadmCluster, etcdv1.InitializedCondition)).To(BeFalse()) } func TestReconcile_EtcdClusterIsInitialized(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() etcdadmCluster := newEtcdadmCluster(cluster) // CAPI machine controller has set status.Initialized to true, after the first etcd Machine is created, and after creating the Secret containing etcd init address etcdadmCluster.Status.Initialized = true // the etcdadm controller does not know yet that CAPI machine controller has set status.Initialized to true; InitializedCondition is still false conditions.MarkFalse(etcdadmCluster, etcdv1.InitializedCondition, etcdv1.WaitingForEtcdadmInitReason, clusterv1.ConditionSeverityInfo, "") machine := newEtcdMachine(etcdadmCluster, cluster) objects := []client.Object{ cluster, etcdadmCluster, infraTemplate.DeepCopy(), machine, } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, uncachedClient: fakeClient, Log: log.Log, } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) updatedEtcdadmCluster := &etcdv1.EtcdadmCluster{} g.Expect(fakeClient.Get(ctx, util.ObjectKey(etcdadmCluster), updatedEtcdadmCluster)).To(Succeed()) g.Expect(conditions.IsTrue(updatedEtcdadmCluster, etcdv1.InitializedCondition)).To(BeTrue()) } func TestReconcileScaleUpEtcdCluster(t *testing.T) { g := NewWithT(t) cluster := newClusterWithExternalEtcd() etcdadmCluster := newEtcdadmCluster(cluster) // CAPI machine controller has set status.Initialized to true, after the first etcd Machine is created, and after creating the Secret containing etcd init address etcdadmCluster.Status.Initialized = true // etcdadm controller has also registered that the status.Initialized field is true, so it has set InitializedCondition to true conditions.MarkTrue(etcdadmCluster, etcdv1.InitializedCondition) machine := newEtcdMachine(etcdadmCluster, cluster) objects := []client.Object{ cluster, etcdadmCluster, infraTemplate.DeepCopy(), machine, } fakeClient := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() r := &EtcdadmClusterReconciler{ Client: fakeClient, uncachedClient: fakeClient, Log: log.Log, } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(etcdadmCluster)}) g.Expect(err).NotTo(HaveOccurred()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), machineList, client.InNamespace("test"))).To(Succeed()) g.Expect(len(machineList.Items)).To(Equal(2)) } // newClusterWithExternalEtcd return a CAPI cluster object with managed external etcd ref func newClusterWithExternalEtcd() *clusterv1.Cluster { return &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: testNamespace, Name: testClusterName, }, Spec: clusterv1.ClusterSpec{ ManagedExternalEtcdRef: &corev1.ObjectReference{ Kind: "EtcdadmCluster", Namespace: testNamespace, Name: testEtcdadmClusterName, APIVersion: etcdv1.GroupVersion.String(), }, InfrastructureRef: &corev1.ObjectReference{ Kind: "InfrastructureTemplate", Namespace: testNamespace, Name: testInfrastructureTemplateName, APIVersion: "infra.io/v1", }, }, Status: clusterv1.ClusterStatus{ InfrastructureReady: true, }, } } func newEtcdadmCluster(cluster *clusterv1.Cluster) *etcdv1.EtcdadmCluster { return &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: testNamespace, Name: testEtcdadmClusterName, OwnerReferences: []metav1.OwnerReference{ { Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String(), Name: cluster.Name, UID: cluster.GetUID(), }, }, Finalizers: []string{etcdv1.EtcdadmClusterFinalizer}, }, Spec: etcdv1.EtcdadmClusterSpec{ EtcdadmConfigSpec: etcdbootstrapv1.EtcdadmConfigSpec{ CloudInitConfig: &etcdbootstrapv1.CloudInitConfig{ Version: "v3.4.9", }, }, Replicas: pointer.Int32(int32(3)), InfrastructureTemplate: corev1.ObjectReference{ Kind: infraTemplate.GetKind(), APIVersion: infraTemplate.GetAPIVersion(), Name: infraTemplate.GetName(), Namespace: testNamespace, }, }, } } func newEtcdMachine(etcdadmCluster *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster) *clusterv1.Machine { return &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.GenerateName(etcdadmCluster.Name + "-"), Namespace: etcdadmCluster.Namespace, Labels: EtcdLabelsForCluster(cluster.Name, etcdadmCluster.Name), OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(etcdadmCluster, etcdv1.GroupVersion.WithKind("EtcdadmCluster")), }, }, Spec: clusterv1.MachineSpec{ ClusterName: cluster.Name, InfrastructureRef: corev1.ObjectReference{ Kind: infraTemplate.GetKind(), APIVersion: infraTemplate.GetAPIVersion(), Name: infraTemplate.GetName(), Namespace: infraTemplate.GetNamespace(), }, }, } }
477
etcdadm-controller
aws
Go
package controllers import ( "context" "reflect" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/failuredomains" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) type EtcdPlane struct { EC *etcdv1.EtcdadmCluster Cluster *clusterv1.Cluster Machines collections.Machines machinesPatchHelpers map[string]*patch.Helper etcdadmConfigs map[string]*etcdbootstrapv1.EtcdadmConfig infraResources map[string]*unstructured.Unstructured } func NewEtcdPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster, ec *etcdv1.EtcdadmCluster, ownedMachines collections.Machines) (*EtcdPlane, error) { infraObjects, err := getInfraResources(ctx, client, ownedMachines) if err != nil { return nil, err } etcdadmConfigs, err := getEtcdadmConfigs(ctx, client, ownedMachines) if err != nil { return nil, err } patchHelpers := map[string]*patch.Helper{} for _, machine := range ownedMachines { patchHelper, err := patch.NewHelper(machine, client) if err != nil { return nil, errors.Wrapf(err, "failed to create patch helper for machine %s", machine.Name) } patchHelpers[machine.Name] = patchHelper } return &EtcdPlane{ EC: ec, Cluster: cluster, Machines: ownedMachines, machinesPatchHelpers: patchHelpers, infraResources: infraObjects, etcdadmConfigs: etcdadmConfigs, }, nil } // Etcdadm controller follows the same logic for selecting a machine to scale down as the KCP controller. Source: https://github.com/kubernetes-sigs/cluster-api/blob/master/controlplane/kubeadm/controllers/scale.go#L234 func selectMachineForScaleDown(ep *EtcdPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { machines := ep.Machines switch { case ep.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0: machines = ep.MachineWithDeleteAnnotation(outdatedMachines) case ep.MachineWithDeleteAnnotation(machines).Len() > 0: machines = ep.MachineWithDeleteAnnotation(machines) case outdatedMachines.Len() > 0: machines = outdatedMachines } return ep.MachineInFailureDomainWithMostMachines(machines) } // MachineWithDeleteAnnotation returns a machine that has been annotated with DeleteMachineAnnotation key. func (ep *EtcdPlane) MachineWithDeleteAnnotation(machines collections.Machines) collections.Machines { // See if there are any machines with DeleteMachineAnnotation key. annotatedMachines := machines.Filter(collections.HasAnnotationKey(clusterv1.DeleteMachineAnnotation)) // If there are, return list of annotated machines. return annotatedMachines } // All functions related to failureDomains follow the same logic as KCP's failureDomain implementation, to leverage existing methods // FailureDomainWithMostMachines returns a fd which has the most machines on it. func (ep *EtcdPlane) FailureDomainWithMostMachines(machines collections.Machines) *string { // See if there are any Machines that are not in currently defined failure domains first. notInFailureDomains := machines.Filter( collections.Not(collections.InFailureDomains(ep.FailureDomains().GetIDs()...)), ) if len(notInFailureDomains) > 0 { // return the failure domain for the oldest Machine not in the current list of failure domains // this could be either nil (no failure domain defined) or a failure domain that is no longer defined // in the cluster status. return notInFailureDomains.Oldest().Spec.FailureDomain } return failuredomains.PickMost(ep.Cluster.Status.FailureDomains, ep.Machines, machines) } // MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it. func (ep *EtcdPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) { fd := ep.FailureDomainWithMostMachines(machines) machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd)) machineToMark := machinesInFailureDomain.Oldest() if machineToMark == nil { return nil, errors.New("failed to pick control plane Machine to mark for deletion") } return machineToMark, nil } // NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines. func (ep *EtcdPlane) NextFailureDomainForScaleUp() *string { if len(ep.Cluster.Status.FailureDomains) == 0 { return nil } return failuredomains.PickFewest(ep.FailureDomains(), ep.UpToDateMachines()) } // FailureDomains returns a slice of failure domain objects synced from the infrastructure provider into Cluster.Status. func (ep *EtcdPlane) FailureDomains() clusterv1.FailureDomains { if ep.Cluster.Status.FailureDomains == nil { return clusterv1.FailureDomains{} } return ep.Cluster.Status.FailureDomains } // UpToDateMachines returns the machines that are up to date with the control // plane's configuration and therefore do not require rollout. func (ep *EtcdPlane) UpToDateMachines() collections.Machines { return ep.Machines.Difference(ep.MachinesNeedingRollout()) } func (ep *EtcdPlane) NewestUpToDateMachine() *clusterv1.Machine { upToDateMachines := ep.UpToDateMachines() return upToDateMachines.Newest() } // MachinesNeedingRollout return a list of machines that need to be rolled out. func (ep *EtcdPlane) MachinesNeedingRollout() collections.Machines { // Ignore machines to be deleted. machines := ep.Machines.Filter(collections.Not(collections.HasDeletionTimestamp)) // Return machines if they are scheduled for rollout or if with an outdated configuration. return machines.AnyFilter( //Machines that do not match with Etcdadm config. collections.Not(MatchesEtcdadmClusterConfiguration(ep.infraResources, ep.etcdadmConfigs, ep.EC)), ) } // MatchesEtcdadmClusterConfiguration returns a filter to find all machines that matches with EtcdadmCluster config and do not require any rollout. // Etcd version and extra params, and infrastructure template need to be equivalent. func MatchesEtcdadmClusterConfiguration(infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*etcdbootstrapv1.EtcdadmConfig, ec *etcdv1.EtcdadmCluster) func(machine *clusterv1.Machine) bool { return collections.And( MatchesEtcdadmConfig(machineConfigs, ec), MatchesTemplateClonedFrom(infraConfigs, ec), ) } // MatchesEtcdadmConfig checks if machine's EtcdadmConfigSpec is equivalent with EtcdadmCluster's spec func MatchesEtcdadmConfig(machineConfigs map[string]*etcdbootstrapv1.EtcdadmConfig, ec *etcdv1.EtcdadmCluster) collections.Func { return func(machine *clusterv1.Machine) bool { if machine == nil { return false } etcdadmConfig, found := machineConfigs[machine.Name] if !found { // Return true here because failing to get EtcdadmConfig should not be considered as unmatching. // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. return true } ecConfig := ec.Spec.EtcdadmConfigSpec.DeepCopy() return reflect.DeepEqual(&etcdadmConfig.Spec, ecConfig) } } // MatchesTemplateClonedFrom returns a filter to find all machines that match a given EtcdadmCluster's infra template. func MatchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructured, ec *etcdv1.EtcdadmCluster) collections.Func { return func(machine *clusterv1.Machine) bool { if machine == nil { return false } infraObj, found := infraConfigs[machine.Name] if !found { // Return true here because failing to get infrastructure machine should not be considered as unmatching. return true } clonedFromName, ok1 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] clonedFromGroupKind, ok2 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] if !ok1 || !ok2 { // All etcdadmCluster cloned infra machines should have this annotation. // Missing the annotation may be due to older version machines or adopted machines. // Should not be considered as mismatch. return true } // Check if the machine's infrastructure reference has been created from the current etcdadmCluster infrastructure template. if clonedFromName != ec.Spec.InfrastructureTemplate.Name || clonedFromGroupKind != ec.Spec.InfrastructureTemplate.GroupVersionKind().GroupKind().String() { return false } return true } } // getInfraResources fetches the external infrastructure resource for each machine in the collection and returns a map of machine.Name -> infraResource. func getInfraResources(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*unstructured.Unstructured, error) { result := map[string]*unstructured.Unstructured{} for _, m := range machines { infraObj, err := external.Get(ctx, cl, &m.Spec.InfrastructureRef, m.Namespace) if err != nil { if apierrors.IsNotFound(errors.Cause(err)) { continue } return nil, errors.Wrapf(err, "failed to retrieve infra obj for machine %q", m.Name) } result[m.Name] = infraObj } return result, nil } // getEtcdadmConfigs fetches the etcdadm config for each machine in the collection and returns a map of machine.Name -> EtcdadmConfig. func getEtcdadmConfigs(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*etcdbootstrapv1.EtcdadmConfig, error) { result := map[string]*etcdbootstrapv1.EtcdadmConfig{} for _, m := range machines { bootstrapRef := m.Spec.Bootstrap.ConfigRef if bootstrapRef == nil { continue } machineConfig := &etcdbootstrapv1.EtcdadmConfig{} if err := cl.Get(ctx, client.ObjectKey{Name: bootstrapRef.Name, Namespace: m.Namespace}, machineConfig); err != nil { if apierrors.IsNotFound(errors.Cause(err)) { continue } return nil, errors.Wrapf(err, "failed to retrieve bootstrap config for machine %q", m.Name) } result[m.Name] = machineConfig } return result, nil }
237
etcdadm-controller
aws
Go
package controllers import ( "context" "crypto/tls" "crypto/x509" "encoding/json" "fmt" "io" "net" "net/http" "net/url" "time" "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( httpClientTimeout = 10 * time.Second portCheckTimeout = 2 * time.Second ) type etcdHealthCheckResponse struct { Health string `json:"health"` } type portNotOpenError struct{} func (h *portNotOpenError) Error() string { return "etcd endpoint port is not open" } var portNotOpenErr = &portNotOpenError{} func (r *EtcdadmClusterReconciler) performEndpointHealthCheck(ctx context.Context, cluster *clusterv1.Cluster, endpoint string, logLevelInfo bool) error { client, err := r.getEtcdHttpClient(ctx, cluster) if err != nil { return err } u, err := url.Parse(endpoint) if err != nil { return errors.Wrapf(err, "invalid etcd endpoint url") } if !isPortOpen(ctx, u.Host) { return portNotOpenErr } healthCheckURL := getMemberHealthCheckEndpoint(endpoint) if logLevelInfo { // logging non-failures only for non-periodic checks so as to not log too many events r.Log.Info("Performing healthcheck on", "endpoint", healthCheckURL) } req, err := http.NewRequest("GET", healthCheckURL, nil) if err != nil { return errors.Wrap(err, "error creating healthcheck request") } resp, err := client.Do(req) if err != nil { return errors.Wrap(err, "error checking etcd member health") } // reuse connection defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return errors.Wrap(err, "Etcd member not ready, retry") } if err := parseEtcdHealthCheckOutput(body); err != nil { return errors.Wrap(err, fmt.Sprintf("etcd member %v failed healthcheck", endpoint)) } if logLevelInfo { r.Log.Info("Etcd member ready", "member", endpoint) } return nil } func parseEtcdHealthCheckOutput(data []byte) error { obj := etcdHealthCheckResponse{} if err := json.Unmarshal(data, &obj); err != nil { return err } if obj.Health == "true" { return nil } return fmt.Errorf("/health returned %q", obj.Health) } func (r *EtcdadmClusterReconciler) getEtcdHttpClient(ctx context.Context, cluster *clusterv1.Cluster) (*http.Client, error) { httpClientVal, httpClientExists := r.etcdHealthCheckConfig.clusterToHttpClient.Load(cluster.UID) if httpClientExists { httpClient, ok := httpClientVal.(*http.Client) if ok { return httpClient, nil } } caCertPool := x509.NewCertPool() caCert, err := r.getCACert(ctx, cluster) if err != nil { return nil, err } caCertPool.AppendCertsFromPEM(caCert) clientCert, err := r.getClientCerts(ctx, cluster) if err != nil { return nil, errors.Wrap(err, "Error getting client cert for healthcheck") } etcdHttpClient := &http.Client{ Timeout: httpClientTimeout, Transport: &http.Transport{ TLSClientConfig: &tls.Config{ RootCAs: caCertPool, Certificates: []tls.Certificate{clientCert}, }, }, } r.etcdHealthCheckConfig.clusterToHttpClient.Store(cluster.UID, etcdHttpClient) return etcdHttpClient, nil } func isPortOpen(ctx context.Context, endpoint string) bool { conn, err := net.DialTimeout("tcp", endpoint, portCheckTimeout) if err != nil { return false } if conn != nil { conn.Close() return true } return false }
145
etcdadm-controller
aws
Go
package controllers import ( "context" "fmt" "net" "net/url" "strings" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/pkg/errors" "go.etcd.io/etcd/api/v3/etcdserverpb" clientv3 "go.etcd.io/etcd/client/v3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apiserver/pkg/storage/names" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" ) const ( httpsPrefix = "https://" etcdClientURLPort = "2379" ) // EtcdMachinesSelectorForCluster returns the label selector necessary to get etcd machines for a given cluster. func EtcdMachinesSelectorForCluster(clusterName, etcdClusterName string) labels.Selector { must := func(r *labels.Requirement, err error) labels.Requirement { if err != nil { panic(err) } return *r } return labels.NewSelector().Add( must(labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Equals, []string{clusterName})), must(labels.NewRequirement(clusterv1.MachineEtcdClusterLabelName, selection.Equals, []string{etcdClusterName})), ) } // EtcdClusterMachines returns a filter to find all etcd machines for a cluster, regardless of ownership. func EtcdClusterMachines(clusterName, etcdClusterName string) func(machine *clusterv1.Machine) bool { selector := EtcdMachinesSelectorForCluster(clusterName, etcdClusterName) return func(machine *clusterv1.Machine) bool { if machine == nil { return false } return selector.Matches(labels.Set(machine.Labels)) } } // ControlPlaneLabelsForCluster returns a set of labels to add to a control plane machine for this specific cluster. func EtcdLabelsForCluster(clusterName string, etcdClusterName string) map[string]string { return map[string]string{ clusterv1.ClusterNameLabel: clusterName, clusterv1.MachineEtcdClusterLabelName: etcdClusterName, } } func (r *EtcdadmClusterReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, failureDomain *string) (ctrl.Result, error) { // Since the cloned resource should eventually have a controller ref for the Machine, we create an // OwnerReference here without the Controller field set infraCloneOwner := &metav1.OwnerReference{ APIVersion: etcdv1.GroupVersion.String(), Kind: "EtcdadmCluster", Name: ec.Name, UID: ec.UID, } // Clone the infrastructure template infraRef, err := external.CreateFromTemplate(ctx, &external.CreateFromTemplateInput{ Client: r.Client, TemplateRef: &ec.Spec.InfrastructureTemplate, Namespace: ec.Namespace, OwnerRef: infraCloneOwner, ClusterName: cluster.Name, Labels: EtcdLabelsForCluster(cluster.Name, ec.Name), }) if err != nil { return ctrl.Result{}, fmt.Errorf("error cloning infrastructure template for etcd machine: %v", err) } if infraRef == nil { return ctrl.Result{}, fmt.Errorf("infrastructure template could not be cloned for etcd machine") } bootstrapRef, err := r.generateEtcdadmConfig(ctx, ec, cluster) if err != nil { return ctrl.Result{}, err } if err := r.generateMachine(ctx, ec, cluster, infraRef, bootstrapRef, failureDomain); err != nil { r.Log.Error(err, "Failed to create initial etcd machine") return ctrl.Result{}, err } return ctrl.Result{}, nil } func (r *EtcdadmClusterReconciler) generateEtcdadmConfig(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster) (*corev1.ObjectReference, error) { owner := metav1.OwnerReference{ APIVersion: etcdv1.GroupVersion.String(), Kind: "EtcdadmCluster", Name: ec.Name, UID: ec.UID, } bootstrapConfig := &etcdbootstrapv1.EtcdadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.GenerateName(ec.Name + "-"), Namespace: ec.Namespace, Labels: EtcdLabelsForCluster(cluster.Name, ec.Name), OwnerReferences: []metav1.OwnerReference{owner}, }, Spec: ec.Spec.EtcdadmConfigSpec, } bootstrapRef := &corev1.ObjectReference{ APIVersion: etcdbootstrapv1.GroupVersion.String(), Kind: "EtcdadmConfig", Name: bootstrapConfig.GetName(), Namespace: bootstrapConfig.GetNamespace(), UID: bootstrapConfig.GetUID(), } if err := r.Client.Create(ctx, bootstrapConfig); err != nil { return nil, errors.Wrap(err, "Failed to create etcdadm bootstrap configuration") } return bootstrapRef, nil } func (r *EtcdadmClusterReconciler) generateMachine(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, infraRef, bootstrapRef *corev1.ObjectReference, failureDomain *string) error { machine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.GenerateName(ec.Name + "-"), Namespace: ec.Namespace, Labels: EtcdLabelsForCluster(cluster.Name, ec.Name), OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(ec, etcdv1.GroupVersion.WithKind("EtcdadmCluster")), }, }, Spec: clusterv1.MachineSpec{ ClusterName: cluster.Name, InfrastructureRef: *infraRef, Bootstrap: clusterv1.Bootstrap{ ConfigRef: bootstrapRef, }, FailureDomain: failureDomain, }, } if err := r.Client.Create(ctx, machine); err != nil { return errors.Wrap(err, "failed to create machine") } return nil } func getEtcdMachineAddress(machine *clusterv1.Machine) string { var foundAddress bool var machineAddress string for _, address := range machine.Status.Addresses { if address.Type == clusterv1.MachineInternalIP || address.Type == clusterv1.MachineInternalDNS { machineAddress = address.Address foundAddress = true break } } for _, address := range machine.Status.Addresses { if !foundAddress { if address.Type == clusterv1.MachineExternalIP || address.Type == clusterv1.MachineExternalDNS { machineAddress = address.Address break } } } return machineAddress } func getMemberClientURL(address string) string { return fmt.Sprintf("%s%s:%s", httpsPrefix, address, etcdClientURLPort) } func getEtcdMachineAddressFromClientURL(clientURL string) string { u, err := url.ParseRequestURI(clientURL) if err != nil { return "" } host, _, err := net.SplitHostPort(u.Host) if err != nil { return "" } return host } func getMemberHealthCheckEndpoint(clientURL string) string { return fmt.Sprintf("%s/health", clientURL) } // source: https://github.com/kubernetes-sigs/etcdadm/blob/master/etcd/etcd.go#L53:6 func memberForPeerURLs(members *clientv3.MemberListResponse, peerURLs []string) (*etcdserverpb.Member, bool) { for _, m := range members.Members { if stringSlicesEqual(m.PeerURLs, peerURLs) { return m, true } } return nil, false } // stringSlicesEqual compares two string slices for equality func stringSlicesEqual(l, r []string) bool { if len(l) != len(r) { return false } for i := range l { if l[i] != r[i] { return false } } return true } // Logic & implementation similar to KCP controller reconciling external MachineTemplate InfrastrucutureReference https://github.com/kubernetes-sigs/cluster-api/blob/master/controlplane/kubeadm/controllers/helpers.go#L123:41 func (r *EtcdadmClusterReconciler) reconcileExternalReference(ctx context.Context, cluster *clusterv1.Cluster, ref corev1.ObjectReference) error { if !strings.HasSuffix(ref.Kind, clusterv1.TemplateSuffix) { return nil } obj, err := external.Get(ctx, r.Client, &ref, cluster.Namespace) if err != nil { return err } // Note: We intentionally do not handle checking for the paused label on an external template reference patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { return err } obj.SetOwnerReferences(util.EnsureOwnerRef(obj.GetOwnerReferences(), metav1.OwnerReference{ APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: cluster.Name, UID: cluster.UID, })) return patchHelper.Patch(ctx, obj) }
251
etcdadm-controller
aws
Go
package controllers import ( "context" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/go-logr/logr" "github.com/pkg/errors" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client" ) // TODO(g-gaston): remove this once we have a stable CAPI repo that contains this, // MachineEtcdReadyLabelName is the label set on machines that have succesfully joined the etcd cluster. const MachineEtcdReadyLabelName = "cluster.x-k8s.io/etcd-ready" type etcdMachines map[string]etcdMachine // endpoints returns all the API endpoints for the machines that have one available. func (e etcdMachines) endpoints() []string { endpoints := make([]string, 0, len(e)) for _, m := range e { if m.endpoint != "" { endpoints = append(endpoints, m.endpoint) } } return endpoints } // etcdMachine represents a Machine that should be a member of an etcd cluster. type etcdMachine struct { *clusterv1.Machine endpoint string listening bool healthError error } func (e etcdMachine) healthy() bool { return e.listening && e.healthError == nil } // updateMachinesEtcdReadyLabel adds the etcd-ready label to the machines that have joined the etcd cluster. func (r *EtcdadmClusterReconciler) updateMachinesEtcdReadyLabel(ctx context.Context, log logr.Logger, machines etcdMachines) error { for _, m := range machines { if _, ok := m.Labels[MachineEtcdReadyLabelName]; ok { continue } if !m.healthy() { log.Info("Machine not healthy yet", "machine", klog.KObj(m.Machine), "listening", m.listening, "healthError", m.healthError, "endpoint", m.endpoint) continue } m.Labels[MachineEtcdReadyLabelName] = "true" if err := r.Client.Update(ctx, m.Machine); err != nil { return errors.Wrapf(err, "adding etcd ready label to machine %s", m.Name) } } return nil } // checkOwnedMachines verifies the health of all etcd members. func (r *EtcdadmClusterReconciler) checkOwnedMachines(ctx context.Context, log logr.Logger, etcdadmCluster *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster) (etcdMachines, error) { ownedMachines, err := r.getCurrentOwnedMachines(ctx, etcdadmCluster, cluster) if err != nil { return nil, err } machines := make(etcdMachines, len(ownedMachines)) for k, machine := range ownedMachines { m := etcdMachine{Machine: machine} endpoint := getMachineEtcdEndpoint(machine) if endpoint == "" { machines[k] = m continue } err := r.performEndpointHealthCheck(ctx, cluster, endpoint, true) // This is not ideal, performEndpointHealthCheck uses an error to signal both a not ready/unhealthy member // and also transient errors when performing such check. // Ideally we would separate these 2 so we can abort on error and mark as unhealthy separetly m.healthError = err if errors.Is(err, portNotOpenErr) { log.Info("Machine is not listening yet, this is probably transient, while etcd starts", "endpoint", endpoint) } else { m.endpoint = endpoint m.listening = true } machines[k] = m } return machines, nil } // getCurrentOwnedMachines lists all the owned machines by the etcdadm cluster. func (r *EtcdadmClusterReconciler) getCurrentOwnedMachines(ctx context.Context, etcdadmCluster *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster) (collections.Machines, error) { var client client.Reader if conditions.IsFalse(etcdadmCluster, etcdv1.EtcdMachinesSpecUpToDateCondition) { // During upgrade with current logic, outdated machines don't get deleted right away. // the controller removes their etcdadmCluster ownerRef and updates the Machine. So using uncachedClient here will fetch those changes client = r.uncachedClient } else { client = r.Client } etcdMachines, err := collections.GetFilteredMachinesForCluster(ctx, client, cluster, EtcdClusterMachines(cluster.Name, etcdadmCluster.Name)) if err != nil { return nil, errors.Wrap(err, "reading machines for etcd cluster") } ownedMachines := etcdMachines.Filter(collections.OwnedMachines(etcdadmCluster)) return ownedMachines, nil } // getMachineEtcdEndpoint constructs the full API url for an etcd member Machine. // If the Machine doesn't have yet the right address, it returns empty string. func getMachineEtcdEndpoint(machine *clusterv1.Machine) string { address := getEtcdMachineAddress(machine) if address == "" { return "" } return getMemberClientURL(address) }
130
etcdadm-controller
aws
Go
package controllers import ( "context" "strings" "sync" "time" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" ) const ( maxUnhealthyCount = 5 healthCheckInterval = 30 ) type etcdHealthCheckConfig struct { clusterToHttpClient sync.Map } type etcdadmClusterMemberHealthConfig struct { unhealthyMembersFrequency map[string]int unhealthyMembersToRemove map[string]*clusterv1.Machine endpointToMachineMapper map[string]*clusterv1.Machine cluster *clusterv1.Cluster endpoints string ownedMachines collections.Machines } func (r *EtcdadmClusterReconciler) startHealthCheckLoop(ctx context.Context, done <-chan struct{}) { r.Log.Info("Starting periodic healthcheck loop") etcdadmClusterMapper := make(map[types.UID]etcdadmClusterMemberHealthConfig) ticker := time.NewTicker(healthCheckInterval * time.Second) defer ticker.Stop() for { select { case <-done: return case <-ticker.C: etcdClusters := &etcdv1.EtcdadmClusterList{} err := r.Client.List(ctx, etcdClusters) if err != nil { r.Log.Error(err, "Error listing etcdadm cluster objects") continue } for _, ec := range etcdClusters.Items { log := r.Log.WithValues("EtcdadmCluster", klog.KObj(&ec)) if annotations.HasPaused(&ec) { log.Info("EtcdadmCluster reconciliation is paused, skipping health checks") continue } if conditions.IsFalse(&ec, etcdv1.EtcdCertificatesAvailableCondition) { log.Info("EtcdadmCluster certificates are not ready, skipping health checks") continue } if !ec.Status.CreationComplete { // etcdCluster not fully provisioned yet log.Info("EtcdadmCluster is not ready, skipping health checks") continue } if conditions.IsFalse(&ec, etcdv1.EtcdMachinesSpecUpToDateCondition) { // etcdCluster is undergoing upgrade, some machines might not be ready yet, skip periodic healthcheck log.Info("EtcdadmCluster machine specs are not up to date, skipping health checks") continue } var cluster *clusterv1.Cluster if clusterEntry, ok := etcdadmClusterMapper[ec.UID]; !ok { cluster, err = util.GetOwnerCluster(ctx, r.Client, ec.ObjectMeta) if err != nil { log.Error(err, "Failed to retrieve owner Cluster from the API Server") continue } if cluster == nil { log.Info("Cluster Controller has not yet set OwnerRef on etcd cluster") continue } ownedMachines := r.getOwnedMachines(ctx, cluster, ec) endpointToMachineMapper := r.createEndpointToMachinesMap(ownedMachines) etcdadmClusterMapper[ec.UID] = etcdadmClusterMemberHealthConfig{ unhealthyMembersFrequency: make(map[string]int), unhealthyMembersToRemove: make(map[string]*clusterv1.Machine), endpointToMachineMapper: endpointToMachineMapper, cluster: cluster, ownedMachines: ownedMachines, } } else { cluster = clusterEntry.cluster if ec.Status.Endpoints != clusterEntry.endpoints { clusterEntry.endpoints = ec.Status.Endpoints ownedMachines := r.getOwnedMachines(ctx, cluster, ec) clusterEntry.ownedMachines = ownedMachines clusterEntry.endpointToMachineMapper = r.createEndpointToMachinesMap(ownedMachines) etcdadmClusterMapper[ec.UID] = clusterEntry } } if err := r.periodicEtcdMembersHealthCheck(ctx, cluster, &ec, etcdadmClusterMapper); err != nil { log.Error(err, "Error performing healthcheck") continue } } } } } func (r *EtcdadmClusterReconciler) periodicEtcdMembersHealthCheck(ctx context.Context, cluster *clusterv1.Cluster, etcdCluster *etcdv1.EtcdadmCluster, etcdadmClusterMapper map[types.UID]etcdadmClusterMemberHealthConfig) error { log := r.Log.WithValues("EtcdadmCluster", klog.KObj(etcdCluster)) if len(etcdCluster.Status.Endpoints) == 0 { log.Info("Skipping healthcheck because Endpoints are empty", "Endpoints", etcdCluster.Status.Endpoints) return nil } currClusterHFConfig := etcdadmClusterMapper[etcdCluster.UID] endpoints := strings.Split(etcdCluster.Status.Endpoints, ",") for _, endpoint := range endpoints { err := r.performEndpointHealthCheck(ctx, cluster, endpoint, false) if err != nil { // member failed healthcheck so add it to unhealthy map or update it's unhealthy count log.Info("Member failed healthcheck, adding to unhealthy members list", "member", endpoint) currClusterHFConfig.unhealthyMembersFrequency[endpoint]++ // if machine corresponding to the member does not exist, remove that member without waiting for max unhealthy count to be reached m, ok := currClusterHFConfig.endpointToMachineMapper[endpoint] if !ok || m == nil { log.Info("Machine for member does not exist", "member", endpoint) currClusterHFConfig.unhealthyMembersToRemove[endpoint] = m } if currClusterHFConfig.unhealthyMembersFrequency[endpoint] >= maxUnhealthyCount { log.Info("Adding to list of unhealthy members to remove", "member", endpoint) // member has been unresponsive, add the machine to unhealthyMembersToRemove queue m := currClusterHFConfig.endpointToMachineMapper[endpoint] currClusterHFConfig.unhealthyMembersToRemove[endpoint] = m } } else { // member passed healthcheck. so if it was previously added to unhealthy map, remove it since only consecutive failures should lead to member removal _, markedUnhealthy := currClusterHFConfig.unhealthyMembersFrequency[endpoint] if markedUnhealthy { delete(currClusterHFConfig.unhealthyMembersFrequency, endpoint) } } } if len(currClusterHFConfig.unhealthyMembersToRemove) == 0 { return nil } finalEndpoints := make([]string, 0, len(endpoints)) for _, endpoint := range endpoints { if _, existsInUnhealthyMap := currClusterHFConfig.unhealthyMembersToRemove[endpoint]; !existsInUnhealthyMap { finalEndpoints = append(finalEndpoints, endpoint) } } var retErr error for machineEndpoint, machineToDelete := range currClusterHFConfig.unhealthyMembersToRemove { if err := r.removeEtcdMachine(ctx, etcdCluster, cluster, machineToDelete, getEtcdMachineAddressFromClientURL(machineEndpoint)); err != nil { // log and save error and continue deletion of other members, deletion of this member will be retried since it's still part of unhealthyMembersToRemove log.Error(err, "error removing etcd member machine", "member", machineToDelete.Name, "endpoint", machineEndpoint) retErr = multierror.Append(retErr, err) continue } delete(currClusterHFConfig.unhealthyMembersToRemove, machineEndpoint) } if retErr != nil { return retErr } etcdCluster.Status.Endpoints = strings.Join(finalEndpoints, ",") etcdCluster.Status.Ready = false return r.Client.Status().Update(ctx, etcdCluster) } func (r *EtcdadmClusterReconciler) createEndpointToMachinesMap(ownedMachines collections.Machines) map[string]*clusterv1.Machine { endpointToMachineMapper := make(map[string]*clusterv1.Machine) for _, m := range ownedMachines { machineClientURL := getMemberClientURL(getEtcdMachineAddress(m)) endpointToMachineMapper[machineClientURL] = m } return endpointToMachineMapper } func (r *EtcdadmClusterReconciler) getOwnedMachines(ctx context.Context, cluster *clusterv1.Cluster, ec etcdv1.EtcdadmCluster) collections.Machines { etcdMachines, err := collections.GetFilteredMachinesForCluster(ctx, r.uncachedClient, cluster, EtcdClusterMachines(cluster.Name, ec.Name)) if err != nil { r.Log.Error(err, "Error filtering machines for etcd cluster") } return etcdMachines.Filter(collections.OwnedMachines(&ec)) }
200
etcdadm-controller
aws
Go
package controllers import ( "context" "crypto/tls" "crypto/x509" "fmt" "strings" "time" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/pkg/errors" clientv3 "go.etcd.io/etcd/client/v3" apierrors "k8s.io/apimachinery/pkg/api/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/etcdadm/constants" ) const etcdClientTimeout = 5 * time.Second func (r *EtcdadmClusterReconciler) intializeEtcdCluster(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, ep *EtcdPlane) (ctrl.Result, error) { if err := r.generateCAandClientCertSecrets(ctx, cluster, ec); err != nil { r.Log.Error(err, "error generating etcd CA certs") return ctrl.Result{}, err } conditions.MarkTrue(ec, etcdv1.EtcdCertificatesAvailableCondition) fd := ep.NextFailureDomainForScaleUp() return r.cloneConfigsAndGenerateMachine(ctx, ec, cluster, fd) } func (r *EtcdadmClusterReconciler) scaleUpEtcdCluster(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, ep *EtcdPlane) (ctrl.Result, error) { fd := ep.NextFailureDomainForScaleUp() return r.cloneConfigsAndGenerateMachine(ctx, ec, cluster, fd) } func (r *EtcdadmClusterReconciler) scaleDownEtcdCluster(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, ep *EtcdPlane, outdatedMachines collections.Machines) (ctrl.Result, error) { // Pick the Machine that we should scale down. machineToDelete, err := selectMachineForScaleDown(ep, outdatedMachines) if err != nil || machineToDelete == nil { return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") } machineAddress := getEtcdMachineAddress(machineToDelete) return ctrl.Result{}, r.removeEtcdMachine(ctx, ec, cluster, machineToDelete, machineAddress) } func (r *EtcdadmClusterReconciler) removeEtcdMachine(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, machineToDelete *clusterv1.Machine, machineAddress string) error { peerURL := fmt.Sprintf("https://%s:2380", machineAddress) etcdClient, err := r.generateEtcdClient(ctx, cluster, ec.Status.Endpoints) if err != nil { return fmt.Errorf("error creating etcd client, err: %v", err) } if etcdClient == nil { return fmt.Errorf("could not create etcd client") } return r.removeEtcdMemberAndDeleteMachine(ctx, etcdClient, peerURL, machineToDelete) } func (r *EtcdadmClusterReconciler) generateEtcdClient(ctx context.Context, cluster *clusterv1.Cluster, endpoints string) (*clientv3.Client, error) { caCertPool := x509.NewCertPool() caCert, err := r.getCACert(ctx, cluster) if err != nil { return nil, err } caCertPool.AppendCertsFromPEM(caCert) clientCert, err := r.getClientCerts(ctx, cluster) if err != nil { return nil, errors.Wrap(err, "error getting client cert for healthcheck") } etcdClient, err := clientv3.New(clientv3.Config{ Endpoints: strings.Split(endpoints, ","), DialTimeout: etcdClientTimeout, TLS: &tls.Config{ RootCAs: caCertPool, Certificates: []tls.Certificate{clientCert}, }, }) return etcdClient, err } func (r *EtcdadmClusterReconciler) removeEtcdMemberAndDeleteMachine(ctx context.Context, etcdClient *clientv3.Client, peerURL string, machineToDelete *clusterv1.Machine) error { log := r.Log // Etcdadm has a "reset" command to remove an etcd member. But we can't run that command on the CAPI machine object after it's provisioned. // so the following logic is based on how etcdadm performs "reset" https://github.com/kubernetes-sigs/etcdadm/blob/master/cmd/reset.go#L65 etcdCtx, cancel := context.WithTimeout(ctx, constants.DefaultEtcdRequestTimeout) mresp, err := etcdClient.MemberList(etcdCtx) cancel() if err != nil { return fmt.Errorf("error listing members: %v", err) } localMember, ok := memberForPeerURLs(mresp, []string{peerURL}) if ok { if len(mresp.Members) > 1 { log.Info("Removing", "member", localMember.Name) etcdCtx, cancel = context.WithTimeout(ctx, constants.DefaultEtcdRequestTimeout) _, err = etcdClient.MemberRemove(etcdCtx, localMember.ID) cancel() if err != nil { return fmt.Errorf("failed to remove etcd member %s with error %v", localMember.Name, err) } if machineToDelete != nil { if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) && !apierrors.IsGone(err) { return fmt.Errorf("failed to delete etcd machine %s with error %v", machineToDelete.Name, err) } } } else { log.Info("Not removing last member in the cluster", "member", localMember.Name) } } else { log.Info("Member was removed") if machineToDelete != nil { // this could happen if the etcd member was removed through etcdctl calls, ensure that the machine gets deleted too if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) && !apierrors.IsGone(err) { return fmt.Errorf("failed to delete etcd machine %s with error %v", machineToDelete.Name, err) } } } return nil }
125
etcdadm-controller
aws
Go
package controllers import ( "context" "sort" "strings" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client" ) func (r *EtcdadmClusterReconciler) updateStatus(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, ownedMachines etcdMachines) error { log := r.Log.WithName(ec.Name) selector := EtcdMachinesSelectorForCluster(cluster.Name, ec.Name) // Copy label selector to its status counterpart in string format. // This is necessary for CRDs including scale subresources. ec.Status.Selector = selector.String() machines := make([]*clusterv1.Machine, 0, len(ownedMachines)) for _, machine := range ownedMachines { machines = append(machines, machine.Machine) } log.Info("Following machines owned by this etcd cluster", "machines", klog.KObjSlice(machines)) desiredReplicas := *ec.Spec.Replicas ec.Status.ReadyReplicas = int32(len(ownedMachines)) if !ec.DeletionTimestamp.IsZero() { return nil } readyReplicas := ec.Status.ReadyReplicas if readyReplicas < desiredReplicas { conditions.MarkFalse(ec, etcdv1.EtcdClusterResizeCompleted, etcdv1.EtcdScaleUpInProgressReason, clusterv1.ConditionSeverityWarning, "Scaling up etcd cluster to %d replicas (actual %d)", desiredReplicas, readyReplicas) return nil } if readyReplicas > desiredReplicas { conditions.MarkFalse(ec, etcdv1.EtcdClusterResizeCompleted, etcdv1.EtcdScaleDownInProgressReason, clusterv1.ConditionSeverityWarning, "Scaling up etcd cluster to %d replicas (actual %d)", desiredReplicas, readyReplicas) return nil } conditions.MarkTrue(ec, etcdv1.EtcdClusterResizeCompleted) for _, m := range ownedMachines { if !m.healthy() { if m.listening { // The machine is listening but not ready/unhealthy ec.Status.Ready = false return m.healthError } else { // The machine is not listening, probably transient while etcd starts return nil } } } // etcd ready when all machines have address set ec.Status.Ready = true conditions.MarkTrue(ec, etcdv1.EtcdEndpointsAvailable) endpoints := ownedMachines.endpoints() sort.Strings(endpoints) currEndpoints := strings.Join(endpoints, ",") log.Info("Comparing current and previous endpoints") // Checking if endpoints have changed. This avoids unnecessary client calls // to get and update the Secret containing the endpoints if ec.Status.Endpoints != currEndpoints { log.Info("Updating endpoints annotation, and the Secret containing etcdadm join address") ec.Status.Endpoints = currEndpoints secretNameNs := client.ObjectKey{Name: ec.Status.InitMachineAddress, Namespace: cluster.Namespace} secretInitAddress := &corev1.Secret{} if err := r.Client.Get(ctx, secretNameNs, secretInitAddress); err != nil { return err } if len(endpoints) > 0 { secretInitAddress.Data["address"] = []byte(getEtcdMachineAddressFromClientURL(endpoints[0])) } else { secretInitAddress.Data["address"] = []byte("") } secretInitAddress.Data["clientUrls"] = []byte(ec.Status.Endpoints) r.Log.Info("Updating init secret with endpoints") if err := r.Client.Update(ctx, secretInitAddress); err != nil { return err } } // set creationComplete to true, this is only set once after the first set of endpoints are ready and never unset, to indicate that the cluster has been created ec.Status.CreationComplete = true return nil }
98
etcdadm-controller
aws
Go
package controllers import ( "context" "fmt" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" ctrl "sigs.k8s.io/controller-runtime" ) const minEtcdMemberReadySeconds = 60 func (r *EtcdadmClusterReconciler) upgradeEtcdCluster(ctx context.Context, cluster *clusterv1.Cluster, ec *etcdv1.EtcdadmCluster, ep *EtcdPlane, machinesToUpgrade collections.Machines, ) (ctrl.Result, error) { /*In the absence of static DNS A records as etcd cluster endpoints, IP addresses of the etcd machines are used as etcd cluster endpoints. During cluster upgrade, etcd machines need to be upgraded first, since the controlplane machines need to know the updated etcd endpoints to pass in as etcd-servers flag value to the kube-apiserver. However, the older outdated controlplane machines will still try to connect to the older etcd members. Hence for now, scale down will not delete the machine & remove the etcd member. It will only remove the ownerRef of the EtcdadmCluster object from the Machine*/ log := r.Log if *ec.Spec.Replicas == 1 { // for single node etcd cluster, scale up first followed by a scale down if int32(ep.Machines.Len()) == *ec.Spec.Replicas { return r.scaleUpEtcdCluster(ctx, ec, cluster, ep) } // remove older etcd member's machine from being an ownedMachine return ctrl.Result{}, r.removeFromListOfOwnedMachines(ctx, ep, machinesToUpgrade) } if int32(ep.Machines.Len()) == *ec.Spec.Replicas { log.Info("Scaling down etcd cluster") return ctrl.Result{}, r.removeFromListOfOwnedMachines(ctx, ep, machinesToUpgrade) } log.Info("Scaling up etcd cluster") return r.scaleUpEtcdCluster(ctx, ec, cluster, ep) } func (r *EtcdadmClusterReconciler) removeFromListOfOwnedMachines(ctx context.Context, ep *EtcdPlane, machinesToUpgrade collections.Machines) error { machineToDelete, err := selectMachineForScaleDown(ep, machinesToUpgrade) if err != nil || machineToDelete == nil { return errors.Wrap(err, "failed to select machine for scale down") } r.Log.Info(fmt.Sprintf("Removing member %s from list of owned Etcd machines", machineToDelete.Name)) // remove the etcd cluster ownerRef so it's no longer considered a machine owned by the etcd cluster machineToDelete.OwnerReferences = []metav1.OwnerReference{} return r.Client.Update(ctx, machineToDelete) }
55
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package msk provides an MskCluster which is compatible with the [github.com/aws/go-kafka-event-source/streams.Cluster] interface. GKES is a non-proprietray library and using [MSK] is not required. This package is provided as a convenience for those who are using [MSK]. Disclaimer: github.com/aws/go-kafka-event-source/msk is not maintained or endorsed by the MSK development team. It is maintained by the developers od GKES. If you have issues with GKES->MSK connectivity, or would like new GKES->MSK features, https://github.com/aws/go-kafka-event-source is the place to ask first. [MSK]: https://aws.amazon.com/msk/ */ package msk import ( // "crypto/tls" // "fmt" // "strings" "context" "crypto/tls" "fmt" "strings" "sync" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/kafka" "github.com/twmb/franz-go/pkg/kgo" kaws "github.com/twmb/franz-go/pkg/sasl/aws" "github.com/twmb/franz-go/pkg/sasl/scram" ) type MskClient interface { ListClusters(context.Context, *kafka.ListClustersInput, ...func(*kafka.Options)) (*kafka.ListClustersOutput, error) GetBootstrapBrokers(context.Context, *kafka.GetBootstrapBrokersInput, ...func(*kafka.Options)) (*kafka.GetBootstrapBrokersOutput, error) } type AuthType int const ( None AuthType = iota MutualTLS SaslScram SaslIam PublicMutualTLS PublicSaslScram PublicSaslIam ) // An implementation of [github.com/aws/go-kafka-event-source/streams.Cluster]. type MskCluster struct { clusterName string client MskClient authType AuthType tlsConfig *tls.Config awsConfig aws.Config scram scram.Auth clientOptions []kgo.Opt builtOptions []kgo.Opt mux sync.Mutex } // Returns the default AWS client config with default region of `region`. DefaultClientConfig panics on errors. func DefaultClientConfig(region string) aws.Config { cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithDefaultRegion(region)) if err != nil { panic(err) } return cfg } // Creates a new MskCluster using DefaultClientConfig. If you're application is running in EC2/ECS Task or Lambda, this is likely the initializer you need. // See [Sasl IAM support] if using SaslIAm/PublicSaslIam AuthType. [Look here to see how to custom client SDK options], such as a custom Rery mechanism. // Note: your application's IAM role will need access to the 'ListClusters' and 'GetBootstrapBrokers' calls for your MSK Cluster. // // [Look here to see how to custom client SDK options]: https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#pkg-examples // [Sasl IAM support]: https://docs.aws.amazon.com/msk/latest/developerguide/iam-access-control.html func NewMskCluster(clusterName string, authType AuthType, region string, optFns ...func(*kafka.Options)) *MskCluster { return NewMskClusterWithClientConfig(clusterName, authType, DefaultClientConfig(region), optFns...) } // Creates a new MskCluster using the specified awsConfig. If you are using STS for authentication, you will likely need to create your own AWS config. // If you are running on some sort of managed container like EC2/ECS Task or Lambda, you can likely use [NewMskCluster] instead. // Note: your application's IAM role will need access to the 'ListClusters' and 'GetBootstrapBrokers' calls for your MSK Cluster. func NewMskClusterWithClientConfig(clusterName string, authType AuthType, awsConfig aws.Config, optFns ...func(*kafka.Options)) *MskCluster { return &MskCluster{ clusterName: clusterName, authType: authType, awsConfig: awsConfig, client: kafka.NewFromConfig(awsConfig, optFns...), } } // Used primarily for MutualTLS authentication. If you need any configuration beyond the certificate itself, or simply switch on TLS, // you'll need to use WithClientOptions instead. See WithClientOptions for an example // // cluster := msk.NewMskCluster("MyCluster", msk.MutualTLS, "us-east-1").WithTlsConfig(myMutualTlsConfig) func (c *MskCluster) WithTlsConfig(tlsConfig *tls.Config) *MskCluster { c.tlsConfig = tlsConfig return c } // Used to supply additional kgo client options. Caution: Options supplied here will override any set by MskCluster. // This call replaces any client options previously set. Usage: // // cluster := msk.NewMskCluster("MyCluster", msk.MutualTLS, "us-east-1").WithClientOptions( // kgo.Dialer((&tls.Dialer{Config: tlsConfig, NetDialer: &net.Dialer{KeepAlive: 20 * time.Minute}}).DialContext)) func (c *MskCluster) WithClientOptions(opts ...kgo.Opt) *MskCluster { c.clientOptions = opts return c } // WithScramUserPass is used to set user/password info for SaslScram/PublicSaslScram auth types. // This package does not provide for Scram credential rotation: // // cluster := msk.NewMskCluster("MyCluster", msk.SaslScram, "us-east-1").WithScramUserPass("super", "secret") func (c *MskCluster) WithScramUserPass(user, pass string) *MskCluster { c.scram = scram.Auth{ User: user, Pass: pass, } return c } // Called by GKES when intiializing Kafka clients. The MskClluster will call ListClusters with a ClusterNameFilter (using cluster.clusterName) // to rertieve the ARN for your specific cluster. Once the arn is retrieved, GetBootstrapBrokers will be called and the appropriate // broker addresses for the specified authType will be used to seed the underlying kgo.Client func (c *MskCluster) Config() (opts []kgo.Opt, err error) { c.mux.Lock() defer c.mux.Unlock() if len(c.builtOptions) == 0 { var brokers []string brokers, err = c.getBootstrapBrokers() if err != nil { return } if len(brokers) > 0 { opts = append(opts, kgo.SeedBrokers(brokers...)) } if c.tlsConfig != nil { opts = append(opts, kgo.DialTLSConfig(c.tlsConfig)) } switch c.authType { case SaslIam, PublicSaslIam: opts = append(opts, kgo.SASL(kaws.ManagedStreamingIAM(c.saslIamAuth))) case SaslScram, PublicSaslScram: // MSK only supports SHA512 opts = append(opts, kgo.SASL(c.scram.AsSha512Mechanism())) } c.builtOptions = append(opts, c.clientOptions...) } return c.builtOptions, nil } // provides the IAM auth mechanism from using aws.Config CredentialsProvider, so as sessions expire, we should be ok. func (c *MskCluster) saslIamAuth(ctx context.Context) (auth kaws.Auth, err error) { var creds aws.Credentials if creds, err = c.awsConfig.Credentials.Retrieve(ctx); err != nil { auth = kaws.Auth{ AccessKey: creds.AccessKeyID, SecretKey: creds.SecretAccessKey, SessionToken: creds.SessionToken, } } return } // fetches broker urls from MSK API and returns the correct list based on AuthType func (c *MskCluster) getBootstrapBrokers() (brokers []string, err error) { var arn string var res *kafka.GetBootstrapBrokersOutput if arn, err = c.getClusterArn(); err != nil { return } if res, err = c.client.GetBootstrapBrokers(context.TODO(), &kafka.GetBootstrapBrokersInput{ ClusterArn: aws.String(arn), }); err != nil { return } var bootstrapString *string switch c.authType { case MutualTLS: bootstrapString = res.BootstrapBrokerStringTls case SaslScram: bootstrapString = res.BootstrapBrokerStringSaslScram case SaslIam: bootstrapString = res.BootstrapBrokerStringSaslIam case PublicMutualTLS: bootstrapString = res.BootstrapBrokerStringPublicTls case PublicSaslScram: bootstrapString = res.BootstrapBrokerStringPublicSaslScram case PublicSaslIam: bootstrapString = res.BootstrapBrokerStringPublicSaslIam default: bootstrapString = res.BootstrapBrokerString } if bootstrapString == nil { err = fmt.Errorf("bootstrap brokers is nil, probably due to mismatched auth type between client and cluster") return } brokers = strings.Split(*bootstrapString, ",") return } func (c *MskCluster) getClusterArn() (arn string, err error) { var res *kafka.ListClustersOutput if res, err = c.client.ListClusters(context.TODO(), &kafka.ListClustersInput{ ClusterNameFilter: aws.String(c.clusterName), }); err != nil { return } if len(res.ClusterInfoList) == 0 { err = fmt.Errorf("cluster not found: %s", c.clusterName) return } ci := res.ClusterInfoList[0] if ci.ClusterArn == nil { err = fmt.Errorf("cluster not found (nil ClusterInfo): %s", c.clusterName) return } arn = *ci.ClusterArn return }
237
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package msk import ( "context" "errors" "testing" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kafka" "github.com/aws/aws-sdk-go-v2/service/kafka/types" ) type mockMskCluster struct { listOutput *kafka.ListClustersOutput brokerOutput *kafka.GetBootstrapBrokersOutput listErr error brokerErr error } func (m mockMskCluster) ListClusters(context.Context, *kafka.ListClustersInput, ...func(*kafka.Options)) (*kafka.ListClustersOutput, error) { return m.listOutput, m.listErr } func (m mockMskCluster) GetBootstrapBrokers(context.Context, *kafka.GetBootstrapBrokersInput, ...func(*kafka.Options)) (*kafka.GetBootstrapBrokersOutput, error) { return m.brokerOutput, m.brokerErr } func TestClusterReturnsErrorOnNilBootsrapBrokers(t *testing.T) { m := mockMskCluster{ listOutput: &kafka.ListClustersOutput{ ClusterInfoList: []types.ClusterInfo{ {ClusterName: aws.String("test"), ClusterArn: aws.String("arn")}, }, }, brokerOutput: &kafka.GetBootstrapBrokersOutput{}, } c := &MskCluster{ clusterName: "test", authType: SaslIam, client: m, } _, err := c.Config() if err == nil { t.Error("expected error") } } func TestClusterReturnsErrorOnMskListFailure(t *testing.T) { m := mockMskCluster{ listErr: errors.New("error"), } c := &MskCluster{ clusterName: "test", authType: SaslIam, client: m, } _, err := c.Config() if err == nil { t.Error("expected error") } } func TestClusterReturnsErrorOnMskBootstrapBrokersFailure(t *testing.T) { m := mockMskCluster{ listOutput: &kafka.ListClustersOutput{ ClusterInfoList: []types.ClusterInfo{ {ClusterName: aws.String("test"), ClusterArn: aws.String("arn")}, }, }, brokerErr: errors.New("error"), } c := &MskCluster{ clusterName: "test", authType: SaslIam, client: m, } _, err := c.Config() if err == nil { t.Error("expected error") } } func TestClusterSuccess(t *testing.T) { m := mockMskCluster{ listOutput: &kafka.ListClustersOutput{ ClusterInfoList: []types.ClusterInfo{ {ClusterName: aws.String("test"), ClusterArn: aws.String("arn")}, }, }, brokerOutput: &kafka.GetBootstrapBrokersOutput{ BootstrapBrokerStringSaslIam: aws.String("a,b,c"), }, } c := &MskCluster{ clusterName: "test", authType: SaslIam, client: m, } opts, err := c.Config() if err != nil { t.Error(err) } if len(opts) == 0 { t.Error("no options built") } if len(opts) != len(c.builtOptions) { t.Error("no options saved for reuse") } }
131
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "sync" "sync/atomic" "time" "unsafe" "github.com/aws/go-kafka-event-source/streams/sak" ) type batchItemType int const ( normal batchItemType = iota placeholder ) type BatchItem[K comparable, V any] struct { batch unsafe.Pointer itemType batchItemType key K Value V Err error UserData any } func (bi BatchItem[K, V]) Key() K { return bi.key } func batchFor[S any, K comparable, V any](ptr unsafe.Pointer) *BatchItems[S, K, V] { return (*BatchItems[S, K, V])(ptr) } type BatchItems[S any, K comparable, V any] struct { eventContext *EventContext[S] key K items []BatchItem[K, V] UserData any callback BatchCallback[S, K, V] completed int64 } // Creates a container for BatchItems and ties them to an EventContext. Once all items in BatchItems.Items() have been executed, // the provided BatchCallback will be executed. func NewBatchItems[S any, K comparable, V any](ec *EventContext[S], key K, cb BatchCallback[S, K, V]) *BatchItems[S, K, V] { return &BatchItems[S, K, V]{ eventContext: ec, key: key, callback: cb, } } func (b *BatchItems[S, K, V]) Key() K { return b.key } func (b *BatchItems[S, K, V]) Items() []BatchItem[K, V] { return b.items } func (b *BatchItems[S, K, V]) executeCallback() ExecutionState { if b.callback != nil { return b.callback(b.eventContext, b) } return Complete } func (b *BatchItems[S, K, V]) completeItem() { if atomic.AddInt64(&b.completed, 1) == int64(len(b.items)) { b.eventContext.AsyncJobComplete(b.executeCallback) } } // Adds items to BatchItems container. Values added in this method will inherit their key from the BatchItems container. func (b *BatchItems[S, K, V]) Add(values ...V) *BatchItems[S, K, V] { for _, value := range values { b.items = append(b.items, BatchItem[K, V]{ key: b.key, Value: value, itemType: normal, batch: unsafe.Pointer(b), }) } return b } // AddWithKey() is similar to Add(), but the items added do not inherit their key from the BatchItems. // Useful for interjectors that may need to batch items that belong to multiple keys. func (b *BatchItems[S, K, V]) AddWithKey(key K, values ...V) *BatchItems[S, K, V] { for _, value := range values { b.items = append(b.items, BatchItem[K, V]{ key: key, Value: value, itemType: normal, batch: unsafe.Pointer(b), }) } return b } type asyncBatchState int const ( batcherReady asyncBatchState = iota batcherExecuting ) type asyncBatchExecutor[S any, K comparable, V any] struct { items []*BatchItem[K, V] noops []*BatchItems[S, K, V] state asyncBatchState flushTimer *time.Timer } func (b *asyncBatchExecutor[S, K, V]) add(item *BatchItem[K, V]) { if item.itemType == placeholder { b.noops = append(b.noops, (*BatchItems[S, K, V])(item.batch)) } else { b.items = append(b.items, item) } } func (b *asyncBatchExecutor[S, K, V]) reset(assignments map[K]*asyncBatchExecutor[S, K, V]) { for i, item := range b.items { delete(assignments, item.key) b.items[i] = nil } for i := range b.noops { b.noops[i] = nil } b.items = b.items[0:0] b.noops = b.noops[0:0] b.state = batcherReady } /* AsyncBatcher performs a similar function to the [AsyncJobScheduler], but is intended for performing actions for multiple events at a time. This is particularly useful when interacting with systems which provide a batch API. For detailed examples, see https://github.com/aws/go-kafka-event-source/docs/asynprocessing.md */ type AsyncBatcher[S any, K comparable, V any] struct { executors []*asyncBatchExecutor[S, K, V] assignments map[K]*asyncBatchExecutor[S, K, V] pendingItems *sak.List[*BatchItem[K, V]] executor BatchExecutor[K, V] executingCount int maxBatchSize int batchDelay time.Duration mux sync.Mutex } // Create a new AsynBatcher. Each invocation of `executor` will have a maximum of `maxBatchSize` items. // No more than `maxConcurrentBatches` will be executing at any given time. AsynBatcher will accumulate items until `delay` has elapsed, // or `maxBatchSize` items have been received. func NewAsyncBatcher[S StateStore, K comparable, V any](executor BatchExecutor[K, V], maxBatchSize, maxConcurrentBatches int, delay time.Duration) *AsyncBatcher[S, K, V] { executors := make([]*asyncBatchExecutor[S, K, V], maxConcurrentBatches) for i := range executors { executors[i] = &asyncBatchExecutor[S, K, V]{ items: make([]*BatchItem[K, V], 0, maxBatchSize), } } if delay == 0 { delay = time.Millisecond * 5 } return &AsyncBatcher[S, K, V]{ executor: executor, assignments: make(map[K]*asyncBatchExecutor[S, K, V]), pendingItems: sak.NewList[*BatchItem[K, V]](), executors: executors, maxBatchSize: maxBatchSize, batchDelay: sak.Abs(delay), } } // Schedules items in BatchItems to be executed when capoacity is available. func (ab *AsyncBatcher[S, K, V]) Add(batch *BatchItems[S, K, V]) ExecutionState { if len(batch.items) == 0 { /* since all events for a given key *must* travel through the async processor we need to add a placeholder in the cases where there are no items to process if we bypass the async scheduler in these situations, we could end up processing out of order if there are other async processes after this one. Additionally, if this is the last processor for a given event, we may deadlock because the original event may not be marked as complete if we add a batch with no items. Add a placeholder to ensure this doesn't happen. */ ab.add(&BatchItem[K, V]{batch: unsafe.Pointer(batch), key: batch.key, itemType: placeholder}) } for i := range batch.items { ab.add( // ensure we don't escape to the heap (*BatchItem[K, V])(sak.Noescape(unsafe.Pointer(&batch.items[i]))), ) } return Incomplete } func (ab *AsyncBatcher[S, K, V]) add(bi *BatchItem[K, V]) { ab.mux.Lock() if asyncBatch := ab.asyncExecutorFor(bi); asyncBatch != nil { ab.addToExecutor(bi, asyncBatch) } else { ab.pendingItems.PushBack(bi) } ab.mux.Unlock() } func (ab *AsyncBatcher[S, K, V]) asyncExecutorFor(item *BatchItem[K, V]) *asyncBatchExecutor[S, K, V] { if batch, ok := ab.assignments[item.key]; ok && batch.state == batcherReady { return batch } else if ok { // this key is currently in an executing batch, so we have to wait for it to finish return nil } for _, batch := range ab.executors { if batch.state == batcherReady { return batch } } return nil } func (ab *AsyncBatcher[S, K, V]) addToExecutor(item *BatchItem[K, V], executor *asyncBatchExecutor[S, K, V]) { ab.assignments[item.key] = executor executor.add(item) if len(executor.items)+len(executor.noops) == ab.maxBatchSize { ab.conditionallyExecuteBatch(executor) } else if executor.flushTimer == nil { executor.flushTimer = time.AfterFunc(ab.batchDelay, func() { // we have a race condition where we could have reached max items ab.mux.Lock() ab.conditionallyExecuteBatch(executor) ab.mux.Unlock() }) } } func (ab *AsyncBatcher[S, K, V]) conditionallyExecuteBatch(executor *asyncBatchExecutor[S, K, V]) { if executor.state == batcherReady { executor.state = batcherExecuting ab.executingCount++ if executor.flushTimer != nil { executor.flushTimer.Stop() executor.flushTimer = nil } go ab.executeBatch(executor) } } func (ab *AsyncBatcher[S, K, V]) completeBatchItems(items []*BatchItem[K, V]) { for _, item := range items { batchFor[S, K, V](item.batch).completeItem() } } func (ab *AsyncBatcher[S, K, V]) executeBatch(executor *asyncBatchExecutor[S, K, V]) { if len(executor.items) > 0 { ab.executor(executor.items) ab.completeBatchItems(executor.items) } for _, b := range executor.noops { b.eventContext.AsyncJobComplete(b.executeCallback) } ab.mux.Lock() ab.executingCount-- executor.reset(ab.assignments) ab.flushPendingItems() ab.mux.Unlock() } func (ab *AsyncBatcher[S, K, V]) flushPendingItems() { if ab.executingCount == len(ab.executors) { // there are no available batches, no need to continue in this loop return } for el := ab.pendingItems.Front(); el != nil; { if executor := ab.asyncExecutorFor(el.Value); executor != nil { ab.addToExecutor(el.Value, executor) tmp := el.Next() ab.pendingItems.Remove(el) el = tmp if ab.executingCount == len(ab.executors) { // there are no available batches, no need to continue in this loop return } } else { el = el.Next() } } }
312
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "sync/atomic" "testing" "time" ) func TestAsyncBatching(t *testing.T) { var items [20]int64 for i := range items { items[i] = int64(i) } done := make(chan struct{}) ec := MockEventContext[intStore](context.TODO(), nil, "", NewIntStore(ntp(0, "")), mockAsyncCompleter{ expectedState: Incomplete, done: done, t: t, }, nil) batch := NewBatchItems(ec, 0, func(_ *EventContext[intStore], b *BatchItems[intStore, int, int64]) ExecutionState { if len(b.items) != 20 { t.Errorf("incorrect number of items. actual: %d, expected: %d", len(b.items), 20) } return Incomplete }, ).Add(items[:]...) executionCount := int64(0) lastProcessed := int64(-1) executor := func(batch []*BatchItem[int, int64]) { if atomic.AddInt64(&executionCount, 1) == 1 { time.Sleep(100 * time.Millisecond) } if len(batch) != 10 { t.Errorf("incorrect batch size. actual %d, exepected %d", len(batch), 10) } for _, batchItem := range batch { value := batchItem.Value oldValue := atomic.SwapInt64(&lastProcessed, value) batchItem.UserData = -value if value-1 != oldValue { t.Errorf("incorrect ordering of async batcher. actual %d, exepected %d", value, oldValue+1) } } } batcher := NewAsyncBatcher[intStore](executor, 10, 10, 0) batcher.Add(batch) timer := time.NewTimer(time.Second) defer timer.Stop() select { case <-done: case <-timer.C: t.Errorf("execution timed out") } if executionCount != 2 { t.Errorf("incorrect execution count. actual %d, expected: %d", executionCount, 2) } for _, item := range batch.items { userData := item.UserData.(int64) if item.Value+userData != 0 { t.Errorf("invalid userdata: %v, %v", userData, item.Value) } } } func TestAsyncNoopBatching(t *testing.T) { done := make(chan struct{}) ec := MockEventContext[intStore](context.TODO(), nil, "", NewIntStore(ntp(0, "")), mockAsyncCompleter{ expectedState: Complete, done: done, t: t, }, nil) batch := NewBatchItems(ec, 0, func(_ *EventContext[intStore], b *BatchItems[intStore, int, int64]) ExecutionState { if len(b.items) != 0 { t.Errorf("incorrect number of items. actual: %d, expected: %d", len(b.items), 0) } return Complete }, ) executor := func(batch []*BatchItem[int, int64]) { t.Errorf("executor should not have been executed") } batcher := NewAsyncBatcher[intStore](executor, 10, 10, 0) batcher.Add(batch) timer := time.NewTimer(time.Second) defer timer.Stop() select { case <-done: case <-timer.C: t.Errorf("execution timed out") } }
116
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "sync" ) const minWorkItemQueueSize = 2 /* This is a thread safe fifo queue implementation, implemented via a peakable buffer. It will actually report a size of maxSize + 1 items when there is a penging put. We're adding this to eliminate the FifoQueueFullError condition which currently results in a thread.Sleep(). Instead, we'll provide a blocking fixed size buffer to provide backpressure. We're going to use a channel as this will decrease the number of allocations and type conversions that are required for linked list. Queue operations make no memory allocations. # Obviously, in a multi-sender context, the separate go-routines will be racing for order, which may be OK depending on use case `done()` reports true if head and channel are empty and there are no pending writes to the channel see async_scheduler.go notes as to why we have a tryEnqueue and resumeEnqueue call */ type asyncItemQueue[T any] struct { size int32 queue chan T headLock sync.Mutex head T // we don't know if T is a pointer or a struct, so we may not be able to just return nil // use emptyItem to return frome peek()/dequeue() when queue is empty emptyItem T peekable bool } func newAsyncItemQueue[T any](maxSize int) *asyncItemQueue[T] { if maxSize < minWorkItemQueueSize { maxSize = minWorkItemQueueSize } return &asyncItemQueue[T]{ size: 0, peekable: false, queue: make(chan T, maxSize-1), } } func (aiq *asyncItemQueue[T]) backfill() bool { select { case aiq.head = <-aiq.queue: aiq.peekable = true default: aiq.head = aiq.emptyItem aiq.peekable = false } return aiq.peekable } func (aiq *asyncItemQueue[T]) enqueueChannel() chan T { return aiq.queue } func (aiq *asyncItemQueue[T]) tryEnqueue(item T) bool { aiq.headLock.Lock() defer aiq.headLock.Unlock() aiq.size++ if !aiq.peekable && !aiq.backfill() { aiq.head = item aiq.peekable = true return true } select { case aiq.queue <- item: return true default: return false } } func (aiq *asyncItemQueue[T]) dequeue() (T, bool) { aiq.headLock.Lock() defer aiq.headLock.Unlock() if !aiq.peekable && !aiq.backfill() { return aiq.emptyItem, false } aiq.size-- item, res := aiq.head, aiq.peekable aiq.backfill() return item, res } func (aiq *asyncItemQueue[T]) peek() (T, bool) { aiq.headLock.Lock() defer aiq.headLock.Unlock() return aiq.head, aiq.peekable } func (aiq *asyncItemQueue[T]) done() bool { aiq.headLock.Lock() defer aiq.headLock.Unlock() return aiq.size <= 0 }
117
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "errors" "runtime" "sync" "sync/atomic" "github.com/aws/go-kafka-event-source/streams/sak" ) type asyncJobContainer[S any, K comparable, V any] struct { eventContext *EventContext[S] finalizer AsyncJobFinalizer[S, K, V] key K value V err error } func (ajc asyncJobContainer[S, K, V]) invokeFinalizer() ExecutionState { return ajc.finalizer(ajc.eventContext, ajc.key, ajc.value, ajc.err) } type worker[S any, K comparable, V any] struct { capacity int workQueue *asyncItemQueue[asyncJobContainer[S, K, V]] processor AsyncJobProcessor[K, V] depth int64 ctx context.Context key K no_key K } func (w *worker[S, K, V]) reset() { w.key = w.no_key } func (w *worker[S, K, V]) tryAddItem(item asyncJobContainer[S, K, V]) bool { if w.workQueue.tryEnqueue(item) { atomic.AddInt64(&w.depth, 1) return true } return false } func (w *worker[S, K, V]) blockingAddItem(item asyncJobContainer[S, K, V]) { select { case w.workQueue.enqueueChannel() <- item: atomic.AddInt64(&w.depth, 1) case <-w.ctx.Done(): } } func (w *worker[S, K, V]) dequeue() { w.workQueue.dequeue() atomic.AddInt64(&w.depth, -1) } func (w *worker[S, K, V]) advance() { w.dequeue() } func (w *worker[S, K, V]) process() { if w.ctx.Err() != nil { // worker is cancelled return } item, ok := w.workQueue.peek() if !ok { return } item.err = w.processor(item.key, item.value) w.advance() item.eventContext.AsyncJobComplete(item.invokeFinalizer) } /* The AsyncJobScheduler provides a generic work scheduler/job serializer which takes a key/value as input via Schedule. All work is organized into queues by 'key'. So for a given key, all work is serial allowing the use of the single writer principle in an asynchronous fashion. In practice, it divides a stream partition into it's individual keys and processes the keys in parallel. After the the scheduling is complete for a key/value, Scheduler will call the `processor` callback defined at initialization. The output of this call will be passed to the `finalizer` callback. If `finalizer` is nil, the event is marked as `Complete`, once the job is finished, ignoring any errors. For detailed examples, see https://github.com/aws/go-kafka-event-source/docs/asynprocessing.md */ type AsyncJobScheduler[S StateStore, K comparable, V any] struct { runStatus sak.RunStatus processor AsyncJobProcessor[K, V] finalizer AsyncJobFinalizer[S, K, V] workerFreeSignal chan struct{} workerMap map[K]*worker[S, K, V] workerChannel chan *worker[S, K, V] // functions as a blocking queue workerQueueDepth int64 maxConcurrentKeys int mux sync.Mutex updateRWLock sync.RWMutex workerPool sync.Pool } type SchedulerConfig struct { Concurrency, WorkerQueueDepth, MaxConcurrentKeys int } /* it does not make an sense to have less concurrent keys than max number of processors */ func (c SchedulerConfig) concurrentKeys() int { if c.Concurrency > c.MaxConcurrentKeys { return c.Concurrency } return c.MaxConcurrentKeys } var DefaultConfig = SchedulerConfig{ Concurrency: runtime.NumCPU(), WorkerQueueDepth: 1000, MaxConcurrentKeys: 10000, } var ComputeConfig = SchedulerConfig{ Concurrency: runtime.NumCPU(), WorkerQueueDepth: 1000, MaxConcurrentKeys: 10000, } var FastNetworkConfig = SchedulerConfig{ Concurrency: runtime.NumCPU() * 4, WorkerQueueDepth: 100, MaxConcurrentKeys: 10000, } var SlowNetworkConfig = SchedulerConfig{ Concurrency: runtime.NumCPU() * 16, WorkerQueueDepth: 100, MaxConcurrentKeys: 10000, } var WideNetworkConfig = SchedulerConfig{ Concurrency: runtime.NumCPU() * 32, WorkerQueueDepth: 1000, MaxConcurrentKeys: 10000, } // Creates an AsyncJobScheduler which is tied to the RunStatus of EventSource. func CreateAsyncJobScheduler[S StateStore, K comparable, V any]( eventSource *EventSource[S], processor AsyncJobProcessor[K, V], finalizer AsyncJobFinalizer[S, K, V], config SchedulerConfig) (*AsyncJobScheduler[S, K, V], error) { return NewAsyncJobScheduler(eventSource.ForkRunStatus(), processor, finalizer, config) } // Creates an AsyncJobScheduler which will continue to run while runStatus.Running() func NewAsyncJobScheduler[S StateStore, K comparable, V any]( runStatus sak.RunStatus, processor AsyncJobProcessor[K, V], finalizer AsyncJobFinalizer[S, K, V], config SchedulerConfig) (*AsyncJobScheduler[S, K, V], error) { if config.WorkerQueueDepth < 0 { return nil, errors.New("workerQueueDepth must be >= 0") } if config.Concurrency < 1 { return nil, errors.New("concurrency must be > 0") } if finalizer == nil { finalizer = func(ec *EventContext[S], k K, v V, err error) ExecutionState { return Complete } } maxConcurrentKeys := config.concurrentKeys() ap := &AsyncJobScheduler[S, K, V]{ runStatus: runStatus, processor: processor, finalizer: finalizer, workerQueueDepth: int64(config.WorkerQueueDepth), workerFreeSignal: make(chan struct{}, 1), workerMap: make(map[K]*worker[S, K, V], maxConcurrentKeys), workerChannel: make(chan *worker[S, K, V], maxConcurrentKeys+1), maxConcurrentKeys: maxConcurrentKeys, } ap.workerPool = sync.Pool{ New: func() interface{} { return ap.newQueue() }, } ap.warmup() for i := 0; i < config.Concurrency; i++ { go ap.work() } return ap, nil } func (ap *AsyncJobScheduler[S, K, V]) isClosed() bool { return !ap.runStatus.Running() } func (ap *AsyncJobScheduler[S, K, V]) queueDepth() int64 { return atomic.LoadInt64(&ap.workerQueueDepth) } func (ap *AsyncJobScheduler[S, K, V]) newQueue() interface{} { qd := int(ap.queueDepth()) return &worker[S, K, V]{ capacity: qd, workQueue: newAsyncItemQueue[asyncJobContainer[S, K, V]](qd), processor: ap.processor, ctx: ap.runStatus.Ctx(), } } // Schedules the value for processing in order by key. The finalizer will be invoked once processing is complete. func (ap *AsyncJobScheduler[S, K, V]) Schedule(ec *EventContext[S], key K, value V) ExecutionState { if ap.isClosed() { return Complete } ap.scheduleItem(asyncJobContainer[S, K, V]{ eventContext: ec, finalizer: ap.finalizer, key: key, value: value, err: nil, }) return Incomplete } func (ap *AsyncJobScheduler[S, K, V]) scheduleItem(item asyncJobContainer[S, K, V]) { var w *worker[S, K, V] = nil var created bool added := false for w == nil { ap.mux.Lock() w, created = ap.grabWorker(item.key) if w == nil { ap.mux.Unlock() // wait until a worker thread finshes processing and try again ap.waitForWorker() if ap.isClosed() { return } } } /* we're in a bit of a pickle here. We need to record that we're adding an item to this key before we unlock the list of keys, otherwise we may end up orphaning a key's queue (adding an item while it's being released) but a blocking operation here will cause a deadlock so tell the worker to stay alive until an item has been added */ added = w.tryAddItem(item) ap.mux.Unlock() if !added { w.blockingAddItem(item) } else if created { ap.enqueueWorker(w) } } func (ap *AsyncJobScheduler[S, K, V]) work() { for { select { case wq := <-ap.workerChannel: if wq != nil { wq.process() ap.releaseWorker(wq) } case <-ap.runStatus.Done(): // there may be routines publishing to or receiving from // ap.workerFreeSignal. If we close it, those that are publishing will cause a panic // so try to publish to close out any receivers. // if we can't, it means the channel is already full and we've done our job and any receivers will close select { case ap.workerFreeSignal <- struct{}{}: default: } return } } } func (ap *AsyncJobScheduler[S, K, V]) SetWorkerQueueDepth(size int) { atomic.StoreInt64(&ap.workerQueueDepth, int64(size)) } /* Dynamically update the MaxConcurrentKeys for the current scheduler. */ func (ap *AsyncJobScheduler[S, K, V]) SetMaxConcurrentKeys(size int) { // prevent any action on workerChannel until this operation is complete ap.updateRWLock.Lock() defer ap.updateRWLock.Unlock() // this does not actually increase the number of workers, just makes room in the pending worker channel prevMaxKeys := ap.maxConcurrentKeys // lock here as grabWorker() uses this value, locked in release/scheduleWorker ap.mux.Lock() ap.maxConcurrentKeys = SchedulerConfig{MaxConcurrentKeys: size}.concurrentKeys() ap.mux.Unlock() ap.ensureWorkerChannelCapacity(size, prevMaxKeys) } func (ap *AsyncJobScheduler[S, K, V]) ensureWorkerChannelCapacity(newSize, oldSize int) { if newSize > oldSize { /* we need to make sure s.workerChannel capacity is > s.maxConcurrentKeys to avoid a deadlock. worker routines pull from this channel and may post back in the same thread if there is more work for a given key. In short, this channel needs to be able to fold at least s.maxConcurrentKeys at any given time */ wc := make(chan *worker[S, K, V], newSize+1) // transfer any pending workers to the new channel oldChan := ap.workerChannel pending: for i := 0; i < newSize; i++ { select { case w := <-oldChan: wc <- w default: break pending } } ap.workerChannel = wc close(oldChan) } } func (ap *AsyncJobScheduler[S, K, V]) waitForWorker() { <-ap.workerFreeSignal } func (ap *AsyncJobScheduler[S, K, V]) workerAvailable() { if ap.isClosed() { return } select { case ap.workerFreeSignal <- struct{}{}: default: } } func (ap *AsyncJobScheduler[S, K, V]) enqueueWorker(w *worker[S, K, V]) { // if ap.isClosed() { // return // } ap.updateRWLock.RLock() ap.workerChannel <- w ap.updateRWLock.RUnlock() } func (ap *AsyncJobScheduler[S, K, V]) warmup() { for i := 0; i < ap.maxConcurrentKeys; i++ { w := ap.workerPool.Get().(*worker[S, K, V]) w.reset() ap.workerPool.Put(w) } } func (ap *AsyncJobScheduler[S, K, V]) releaseWorker(w *worker[S, K, V]) { // if ap.isClosed() { // return // } ap.mux.Lock() if w.workQueue.done() { delete(ap.workerMap, w.key) w.reset() ap.workerPool.Put(w) ap.mux.Unlock() ap.workerAvailable() return } ap.mux.Unlock() ap.enqueueWorker(w) } func (ap *AsyncJobScheduler[S, K, V]) grabWorker(key K) (*worker[S, K, V], bool) { var w *worker[S, K, V] var ok bool if w, ok = ap.workerMap[key]; !ok { if len(ap.workerMap) >= ap.maxConcurrentKeys { return nil, false } for w = ap.workerPool.Get().(*worker[S, K, V]); w.capacity != int(ap.queueDepth()); { // we've updated the workerQueueDepth, exhaust the pool until we create a new one w = ap.workerPool.Get().(*worker[S, K, V]) } w.key = key ap.workerMap[key] = w } return w, !ok }
415
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "sync" "testing" "time" "github.com/aws/go-kafka-event-source/streams/sak" ) func TestAsyncJobSchedulerOrdering(t *testing.T) { runStatus := sak.NewRunStatus(context.Background()) defer runStatus.Halt() done := make(chan struct{}, 100) ordered := make(map[int]int) mapLock := &sync.Mutex{} scheduler, err := NewAsyncJobScheduler(runStatus, func(int, int) error { time.Sleep(time.Millisecond) return nil }, func(ec *EventContext[intStore], key int, value int, err error) ExecutionState { mapLock.Lock() defer mapLock.Unlock() var lastValue int var ok bool if lastValue, ok = ordered[key]; ok { if value <= lastValue { t.Errorf("out of sequence events for key: %d, lastValue: %d, newValue: %d", key, lastValue, value) } } ordered[key] = lastValue return Complete }, WideNetworkConfig) if err != nil { t.Error(err) t.FailNow() } timer := time.NewTimer(defaultTestTimeout) defer timer.Stop() executionCount := 0 testSize := 100000 store := NewIntStore(TopicPartition{}) go func() { for i := 0; i < testSize; i++ { event := MockEventContext[intStore](runStatus.Ctx(), NewRecord(), "", store, mockAsyncCompleter{ done: done, expectedState: Complete, t: t, }, nil) key := i % 1000 value := 1 scheduler.Schedule(event, key, value) } }() for { select { case <-done: executionCount++ if executionCount == testSize { return } case <-timer.C: t.Errorf("execution timed out") return } } } // We need to avoid deadlocks between the async process and the event source. // An async processor should never accept more items than eosProducerPool.maxPendingItems(), // otherwise we will deadlock as the async process signals the partionWorker that processing is complete // (partionWorker may be blocked on eosProducerPool.addEventContext). // This test ensures we do not make any code changes that break this rule. // // In the current implementation, this is enforced by the partitionWorker.maxPending channel, but we're adding the test // as part of the async testing, as this problem only exists because of async processing func TestAsyncJobScheduler_CapacityGreaterThanEOSProducer(t *testing.T) { if testing.Short() { t.Skip() return } itemCount := 100000 // must be greater than the default EOS Producer capacity of 30k processed := 0 done := make(chan struct{}, 1000) es, p, _ := newTestEventSource() p.produceMany(t, "int", itemCount) scheduler, _ := NewAsyncJobScheduler(es.ForkRunStatus(), func(key, value int) error { time.Sleep(time.Millisecond) return nil }, func(ec *EventContext[intStore], key, value int, err error) ExecutionState { done <- struct{}{} return Complete }, SchedulerConfig{ Concurrency: 100, MaxConcurrentKeys: itemCount, WorkerQueueDepth: 10, }) RegisterEventType(es, decodeIntStoreItem, func(ec *EventContext[intStore], event intStoreItem) ExecutionState { return scheduler.Schedule(ec, event.Key, event.Value) }, "int") es.ConsumeEvents() defer es.StopNow() timer := time.NewTimer(time.Minute) for { select { case <-done: processed++ if processed == itemCount { return } case <-timer.C: t.Error("async processing deadlock") t.FailNow() return } } }
139
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import "github.com/twmb/franz-go/pkg/kgo" type BalanceStrategy int func toGroupBalancers(instructionHandler IncrRebalanceInstructionHandler, rs []BalanceStrategy) []kgo.GroupBalancer { balancers := []kgo.GroupBalancer{} for _, balancer := range rs { switch balancer { case RangeBalanceStrategy: balancers = append(balancers, kgo.RangeBalancer()) case RoundRobinBalanceStrategy: balancers = append(balancers, kgo.RoundRobinBalancer()) case CooperativeStickyBalanceStrategy: balancers = append(balancers, kgo.CooperativeStickyBalancer()) case IncrementalBalanceStrategy: balancers = append(balancers, IncrementalRebalancer(instructionHandler)) } } return balancers } const ( RangeBalanceStrategy BalanceStrategy = 0 RoundRobinBalanceStrategy BalanceStrategy = 1 CooperativeStickyBalanceStrategy BalanceStrategy = 2 IncrementalBalanceStrategy BalanceStrategy = 3 ) var DefaultBalanceStrategies = []BalanceStrategy{IncrementalBalanceStrategy}
46
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import "testing" func BenchmarkXxx(b *testing.B) { b.ResetTimer() }
22
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "bytes" "encoding/binary" "fmt" "math" "unsafe" "github.com/aws/go-kafka-event-source/streams/sak" jsoniter "github.com/json-iterator/go" ) type Codec[T any] interface { Encode(*bytes.Buffer, T) error Decode([]byte) (T, error) } var defaultJson = jsoniter.ConfigCompatibleWithStandardLibrary // A convenience function for decoding an IncomingRecord. // Conforms to streams.IncomingRecordDecoder interface needed for streams.RegisterEventType // // streams.RegisterEventType(myEventSource, codec.JsonItemDecoder[myType], myHandler, "myType") // // or standalone // myDecoder := codec.JsonItemDecoder[myType] // myItem := myDecoder(incomingRecord) func JsonItemDecoder[T any](record IncomingRecord) (T, error) { var codec JsonCodec[T] return codec.Decode(record.Value()) } // A convenience function for encoding an item into a Record suitable for sending to a producer // Please not that the Key on the record will be left uninitialized. Usage: // // record := codec.JsonItemEncoder("myType", myItem) // record.WriteKeyString(myItem.Key) func JsonItemEncoder[T any](recordType string, item T) *Record { var codec JsonCodec[T] record := NewRecord().WithRecordType(recordType) codec.Encode(record.ValueWriter(), item) return record } // A convenience function for encoding an item into a ChangeLogEntry suitable writing to a StateStore // Please not that the Key on the entry will be left uninitialized. Usage: // // entry := codec.EncodeJsonChangeLogEntryValue("myType", myItem) // entry.WriteKeyString(myItem.Key) func EncodeJsonChangeLogEntryValue[T any](entryType string, item T) ChangeLogEntry { var codec JsonCodec[T] cle := NewChangeLogEntry().WithEntryType(entryType) codec.Encode(cle.ValueWriter(), item) return cle } type intCodec[T sak.Signed] struct{} func (intCodec[T]) Encode(b *bytes.Buffer, i T) error { writeSignedIntToByteArray(i, b) return nil } func (intCodec[T]) Decode(b []byte) (T, error) { return readIntegerFromByteArray[T](b), nil } // Convenience codec for working with int types. // Will never induce an error unless there is an OOM condition, so they are safe to ignore on Encode/Decode var IntCodec = intCodec[int]{} // Convenience codec for working with int64 types // Will never induce an error unless there is an OOM condition, so they are safe to ignore on Encode/Decode var Int64Codec = intCodec[int64]{} // Convenience codec for working with int32 types // Will never induce an error unless there is an OOM condition, so they are safe to ignore on Encode/Decode var Int32Codec = intCodec[int32]{} // A convenience Codec for integers where the encoded value is suitable for sorting in data structure which use // []byte as keys (such as an LSM based db like BadgerDB or RocksDB). Useful if you need to persist items in order by timestamp // or some other integer value. // Decode will generate an error if the input []byte size is not [LexInt64Size]. var LexoInt64Codec = lexoInt64Codec{} type lexoInt64Codec struct{} const LexInt64Size = int(unsafe.Sizeof(uint64(1))) + 1 // Encodes the provided value. Will never induce an error unless there is an OOM condition, so it should be safe to ignore. func (lexoInt64Codec) Encode(buf *bytes.Buffer, i int64) error { var b [LexInt64Size]byte if i > 0 { b[0] = 1 binary.LittleEndian.PutUint64(b[1:], uint64(i)) } else { binary.LittleEndian.PutUint64(b[1:], uint64(math.MaxInt64+i)) } buf.Write(b[:]) return nil } // Decodes the provided []byte. If len([]byte) is not equal to [LexInt64Size], an error will be generated. func (lexoInt64Codec) Decode(b []byte) (int64, error) { if len(b) != LexInt64Size { return 0, fmt.Errorf("invalid lexo integer []byte length. Expected %d, actual: %d", LexInt64Size, len(b)) } sign := b[0] val := int64(binary.LittleEndian.Uint64(b[1:])) if sign == 1 { return val, nil } return val - math.MaxInt64, nil } // A generic JSON en/decoder. // Uses "github.com/json-iterator/go".ConfigCompatibleWithStandardLibrary for en/decoding JSON in a performant way type JsonCodec[T any] struct{} // Encodes the provided value. func (JsonCodec[T]) Encode(b *bytes.Buffer, t T) error { stream := defaultJson.BorrowStream(b) defer defaultJson.ReturnStream(stream) stream.WriteVal(t) return stream.Flush() } // Decodes the provided []byte, func (JsonCodec[T]) Decode(b []byte) (T, error) { iter := defaultJson.BorrowIterator(b) defer defaultJson.ReturnIterator(iter) var t T iter.ReadVal(&t) return t, iter.Error } type stringCodec struct{} // Encodes the provide value. Will never induce an error unless there is an OOM condition, so it should be safe to ignore. func (stringCodec) Encode(b *bytes.Buffer, s string) error { _, err := b.WriteString(s) return err } // Decodes the provide value. Will never induce an error so it is safe to ignore. func (stringCodec) Decode(b []byte) (string, error) { return string(b), nil } // Convenience codec for working with strings. var StringCodec Codec[string] = stringCodec{} type byteCodec struct{} // Encodes the provide value. Will never induce an error unless there is an OOM condition, so it should be safe to ignore on Encode/Decode func (byteCodec) Encode(b *bytes.Buffer, v []byte) error { _, err := b.Write(v) return err } // Decodes the provide value. Will never induce an error so it is safe to ignore. func (byteCodec) Decode(b []byte) ([]byte, error) { return b, nil } // Convenience codec for working with raw `[]byte`s var ByteCodec Codec[[]byte] = byteCodec{}
183
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "bytes" "testing" ) func TestLexoIntCodec(t *testing.T) { a := bytes.NewBuffer(nil) b := bytes.NewBuffer(nil) c := bytes.NewBuffer(nil) d := bytes.NewBuffer(nil) LexoInt64Codec.Encode(a, -2) LexoInt64Codec.Encode(b, 1) LexoInt64Codec.Encode(c, 10) LexoInt64Codec.Encode(d, -4) if bytes.Compare(a.Bytes(), b.Bytes()) >= 0 { t.Errorf("invalid lexo compare %d, %d", -2, 1) } if bytes.Compare(b.Bytes(), c.Bytes()) >= 0 { t.Errorf("invalid lexo compare %d, %d", 1, 10) } if bytes.Compare(d.Bytes(), a.Bytes()) >= 0 { t.Errorf("invalid lexo compare %d, %d", -4, -2) } } func TestLexoIntCodecDecode(t *testing.T) { a := bytes.NewBuffer(nil) b := bytes.NewBuffer(nil) c := bytes.NewBuffer(nil) d := bytes.NewBuffer(nil) LexoInt64Codec.Encode(a, -2) LexoInt64Codec.Encode(b, 1) LexoInt64Codec.Encode(c, 10) LexoInt64Codec.Encode(d, -4) if v, _ := LexoInt64Codec.Decode(a.Bytes()); v != -2 { t.Errorf("invalid lexo decode. actual: %d, expected: %d", v, -2) } if v, _ := LexoInt64Codec.Decode(b.Bytes()); v != 1 { t.Errorf("invalid lexo decode. actual: %d, expected: %d", v, 1) } if v, _ := LexoInt64Codec.Decode(c.Bytes()); v != 10 { t.Errorf("invalid lexo decode. actual: %d, expected: %d", v, 10) } if v, _ := LexoInt64Codec.Decode(d.Bytes()); v != -4 { t.Errorf("invalid lexo decode. actual: %d, expected: %d", v, -4) } }
71
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "bytes" "sync" "unsafe" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/google/uuid" ) type eosCommitLog struct { pendingSyncs map[string]*sync.WaitGroup watermarks map[int32]int64 mux sync.Mutex syncMux sync.Mutex numPartitions int32 topic string changeLog GlobalChangeLog[*eosCommitLog] } const intByteSize = int(unsafe.Sizeof(uintptr(1))) func writeTopicPartitionToBytes(tp TopicPartition, b *bytes.Buffer) { var arr [intByteSize]byte *(*int64)(unsafe.Pointer(&arr[0])) = int64(tp.Partition) b.Write(arr[:]) b.WriteString(tp.Topic) } func writeSignedIntToByteArray[T sak.Signed](i T, b *bytes.Buffer) { var arr [intByteSize]byte *(*int64)(unsafe.Pointer(&arr[0])) = int64(i) b.Write(arr[:]) } func readIntegerFromByteArray[T sak.Signed](b []byte) T { return T(*(*int64)(unsafe.Pointer(&b[0]))) } func topicPartitionFromBytes(b []byte) (tp TopicPartition) { tp.Partition = readIntegerFromByteArray[int32](b) tp.Topic = string(b[intByteSize:]) return } func newEosCommitLog(runStatus sak.RunStatus, source *Source, numPartitions int) *eosCommitLog { cl := &eosCommitLog{ watermarks: make(map[int32]int64), pendingSyncs: make(map[string]*sync.WaitGroup), numPartitions: int32(numPartitions), topic: source.CommitLogTopicNameForGroupId(), } cl.changeLog = NewGlobalChangeLogWithRunStatus(runStatus, source.stateCluster(), cl, numPartitions, cl.topic, CompactCleanupPolicy) return cl } func (cl *eosCommitLog) commitRecordPartition(tp TopicPartition) int32 { return tp.Partition % cl.numPartitions } func (cl *eosCommitLog) commitRecord(tp TopicPartition, offset int64) *Record { record := NewRecord().WithTopic(cl.topic).WithPartition(cl.commitRecordPartition(tp)) writeTopicPartitionToBytes(tp, record.KeyWriter()) // increment so we start consuming at the next offset writeSignedIntToByteArray(offset+1, record.ValueWriter()) return record } func (cl *eosCommitLog) ReceiveChange(record IncomingRecord) error { if record.isMarkerRecord() { cl.closeSyncRequest(string(record.Value())) } else { tp := topicPartitionFromBytes(record.Key()) offset := readIntegerFromByteArray[int64](record.Value()) cl.mux.Lock() cl.watermarks[tp.Partition] = offset cl.mux.Unlock() } return nil } func (cl *eosCommitLog) Revoked() {} func (cl *eosCommitLog) closeSyncRequest(mark string) { cl.syncMux.Lock() if wg, ok := cl.pendingSyncs[mark]; ok { delete(cl.pendingSyncs, mark) wg.Done() } cl.syncMux.Unlock() } func (cl *eosCommitLog) syncAll() { wg := &sync.WaitGroup{} for i := int32(0); i < cl.numPartitions; i++ { tp := ntp(i, cl.topic) wg.Add(1) go func() { cl.syncCommitLogPartition(tp) wg.Done() }() } wg.Wait() } func (cl *eosCommitLog) lastProcessed(tp TopicPartition) int64 { cl.syncCommitLogPartition(ntp(cl.commitRecordPartition(tp), cl.topic)) return cl.Watermark(tp) } func (cl *eosCommitLog) syncCommitLogPartition(tp TopicPartition) { cl.syncMux.Lock() mark := uuid.NewString() markWaiter := &sync.WaitGroup{} markWaiter.Add(1) cl.pendingSyncs[mark] = markWaiter cl.syncMux.Unlock() sendMarkerMessage(cl.changeLog.client, tp, []byte(mark)) markWaiter.Wait() } func (cl *eosCommitLog) Watermark(tp TopicPartition) int64 { cl.mux.Lock() defer cl.mux.Unlock() if offset, ok := cl.watermarks[tp.Partition]; ok { return offset } return -1 } // func (cl *eosCommitLog) Stop() { // cl.changeLog.Stop() // cl.changeLog.client.Close() // } func (cl *eosCommitLog) Start() { cl.changeLog.Start() }
154
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* GKES (Go Kafka Event Source) attempts to fill the gaps ub the Go/Kafka library ecosystem. It supplies Exactly Once Semantics (EOS), local state stores and incremental consumer rebalancing to Go Kafka consumers, making it a viable alternative to a traditional Kafka Streams application written in Java. # What it is GKES is Go/Kafka library tailored towards the development of [Event Sourcing applications], by providing a high-throughput, low-latency Kafka client framework. Using Kafka transactions, it provides for EOS, data integrity and high availability. If you wish to use GKES as straight Kafka consumer, it will fit the bill as well. Though there are plenty of libraries for that, and researching which best fits your use case is time well spent. GKES is not an all-in-one, do-everything black box. Some elements, in particular the StateStore, have been left without comprehensive implementations. # StateStores A useful and performant local state store rarely has a flat data structure. If your state store does, there are some convenient implementations provided. However, to achieve optimum performance, you will not only need to write a StateStore implementation, but will also need to understand what the proper data structures are for your use case (trees, heaps, maps, disk-based LSM trees or combinations thereof). You can use the provided [github.com/aws/go-kafka-event-source/streams/stores.SimpleStore] as a starting point. # Vending State GKES purposefully does not provide a pre-canned way for exposing StateStore data, other than a producing to another Kafka topic. There are as many ways to vend data as there are web applications. Rather than putting effort into inventing yet another one, GKES provides the mechanisms to query StateStores via Interjections. This mechanism can be plugged into whatever request/response mechanism that suits your use-case (gRPC, RESTful HTTP service...any number of web frameworks already in the Go ecosystem). [TODO: provide a simple http example] # Interjections For this familiar with thw Kafka Streams API, GKES provides for stream `Punctuators“, but we call them `Interjections` (because it sounds cool). Interjections allow you to insert actions into your EventSource at specicifed interval per partition assigned via [streams.EventSource.ScheduleInterjection], or at any time via [streams.EventSource.Interject]. This is useful for bookeeping activities, aggregated metric production or even error handling. Interjections have full access to the StateStore associated with an EventSource and can interact with output topics like any other EventProcessor. # Incremental Consumer Rebalancing One issue that Kafka conumer applications have long suffered from are latency spikes during a consumer rebalance. The cooperative sticky rebalancing introduced by Kafka and implemented by [kgo] helps resolve this issue. However, once StateStore are thrown into the mix, things get a bit more complicated because initializing the StateStore on a host invloves consuming a compacted TopicPartion from start to end. GKES solves this with the [IncrementalRebalancer] and takes it one step further. The [IncrementalRebalancer] rebalances consumer partitions in a controlled fashion, minimizing latency spikes and limiting the blast of a bad deployment. # Async Processing GKES provides conventions for asynchronously processing events on the same Kafka partition while still maintaining data/stream integrity. The [AsyncBatcher] and [AsyncJobScheduler] allow you to split a TopicPartition into sub-streams by key, ensuring all events for a partitcular key are processed in order, allowing for parallel processing on a given TopicPartition. For more details, see [Async Processing Examples] # High-Throughput/Low-Latency EOS A Kafka transaction is a powerful tool which allows for Exactly Once Semantics (EOS) by linking a consumer offset commit to one or more records that are being produced by your application (a StateStore record for example). The history of Kafka EOS is a long and complicated one with varied degrees of performance and efficiency. Early iterations required one producer transaction per consumer partition, which was very ineffiecient as Topic with 1000 partitions would also require 1000 clients in order to provide EOS. This has since been addressed, but depending on client implementations, there is a high risk of running into "producer fenced" errors as well as reduced throughput. In a traditional Java Kafka Streams application, transactions are committed according to the auto-commit frequency, which defaults to 100ms. This means that your application will only produce readable records every 100ms per partition. The effect of this is that no matter what you do, your tail latency will be at least 100ms and downstream consumers will receive records in bursts rather than a steady stream. For many use cases, this is unaceptable. GKES solves this issue by using a configurable transactional producer pool and a type of "Nagle's algorithm". Uncommitted offsets are added to the transaction pool in sequence. Once a producer has reach its record limit, or enough time has elapsed (10ms by default), the head transaction will wait for any incomplete events to finsh, then flush and commit. While this transaction is committing, GKES continues to process events and optimistically begins a new transaction and produces records on the next producer in pool. Since trasnaction produce in sequence, there is no danger of commit offset overlap or duplicate message processing in the case of a failure. To ensure EOS, your [EventSource] must use either the [IncrementalRebalancer], or [kgo]s cooperative sticky implementation. Though if you're using a StateStore, [IncrementalRebalancer] should be used to avoid lengthy periods of inactivity during application deployments. # Kafka Client Library Rather than create yet another Kafka driver, GKES is built on top of [kgo]. This Kafka client was chosen as it (in our testing) has superior throughput and latency profiles compared to other client libraries currently available to Go developers. One other key adavantage is that it provides a migration path to cooperative consumer rebalancing, required for our EOS implementation. Other Go Kafka libraries provide cooperative rebalancing, but do not allow you to migrate froma non-cooperative rebalancing strategy (range, sticky etc.). This is a major roadblock for existing deployemtns as the only migration paths are an entirely new consumer group, or to bring your application completely down and re-deploy with a new rebalance strategy. These migration plans, to put it mildly, are big challenge for zero-downtime/live applications. The [kgo] package now makes this migration possible with zero downtime. Kgo also has the proper hooks need to implement the [IncrementalGroupRebalancer], which is necessary for safe deployments when using a local state store. Kudos to [kgo]! [Event Sourcing applications]: https://martinfowler.com/eaaDev/EventSourcing.html [kgo]: https://pkg.go.dev/github.com/twmb/franz-go/pkg/kgo [Async Processing Examples]: https://github.com/aws/go-kafka-event-source/blame/main/docs/asyncprocessing.md */ package streams
108
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "fmt" "sync" "sync/atomic" "time" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/google/uuid" "github.com/twmb/franz-go/pkg/kgo" ) const ( noPendingDuration = time.Minute ) type partitionOwners[T any] struct { owners map[int32]*producerNode[T] mux *sync.Mutex } func (po partitionOwners[T]) owned(p int32, pn *producerNode[T]) bool { po.mux.Lock() defer po.mux.Unlock() return po.owners[p] == pn } func (po partitionOwners[T]) set(p int32, pn *producerNode[T]) { po.mux.Lock() po.owners[p] = pn po.mux.Unlock() } func (po partitionOwners[T]) conditionallyUpdate(p int32, pn *producerNode[T]) { po.mux.Lock() if _, ok := po.owners[p]; !ok { po.owners[p] = pn } po.mux.Unlock() } func (po partitionOwners[T]) clear(p int32) { po.mux.Lock() delete(po.owners, p) po.mux.Unlock() } // a container that allows to know who produced the records // needed for txn error conditions while there are revoked partitions // allows us to filter recordsToProduce on a retry and exclude partitions that have been revoked type pendingRecord struct { record *Record partition int32 cb func(*Record, error) } var pendingRecordPool = sak.NewPool(10, func() []pendingRecord { return make([]pendingRecord, 0) }, func(a []pendingRecord) []pendingRecord { for i := range a { a[i].record = nil } return a[0:0] }) type eosProducerPool[T StateStore] struct { producerNodeQueue chan *producerNode[T] onDeck *producerNode[T] commitQueue chan *producerNode[T] buffer chan *EventContext[T] cfg EosConfig source *Source producerNodes []*producerNode[T] flushTimer *time.Ticker partitionOwners partitionOwners[T] startTime time.Time errorChannel chan error commitClient *kgo.Client } func newEOSProducerPool[T StateStore](source *Source, commitLog *eosCommitLog, cfg EosConfig, commitClient *kgo.Client, metrics chan Metric) *eosProducerPool[T] { pp := &eosProducerPool[T]{ cfg: cfg, producerNodeQueue: make(chan *producerNode[T], cfg.PoolSize), commitQueue: make(chan *producerNode[T], cfg.PendingTxnCount), buffer: make(chan *EventContext[T], 1024), producerNodes: make([]*producerNode[T], 0, cfg.PoolSize), errorChannel: make(chan error, 1024), //giving this some size so we don't block on errors in other go routines source: source, commitClient: commitClient, partitionOwners: partitionOwners[T]{ owners: make(map[int32]*producerNode[T]), mux: new(sync.Mutex), }, flushTimer: time.NewTicker(noPendingDuration), } var first *producerNode[T] var prev *producerNode[T] var last *producerNode[T] for i := 0; i < cfg.PoolSize; i++ { p := newProducerNode(i, source, commitLog, pp.partitionOwners, commitClient, metrics, pp.errorChannel) pp.producerNodes = append(pp.producerNodes, p) if first == nil { first = p } else { prev.next = p } last = p prev = p pp.producerNodeQueue <- p } last.next = first pp.onDeck = <-pp.producerNodeQueue go pp.forwardExecutionContexts() go pp.commitLoop() return pp } // buffer the event context until a producer node is available func (pp *eosProducerPool[T]) addEventContext(ec *EventContext[T]) { pp.buffer <- ec } func (pp *eosProducerPool[T]) maxPendingItems() int { return pp.cfg.MaxBatchSize * pp.cfg.PoolSize } func (pp *eosProducerPool[T]) doForwardExecutionContexts(ec *EventContext[T]) { if ec.isRevoked() { // if we're revoked, don't even add this to the onDeck producer ec.producerChan <- nil ec.revocationWaiter.Done() return } txnStarted := pp.onDeck.addEventContext(ec) pp.partitionOwners.conditionallyUpdate(ec.partition(), pp.onDeck) ec.producerChan <- pp.onDeck // off to the races if pp.shouldTryFlush() { pp.tryFlush() } else if txnStarted { pp.flushTimer.Reset(pp.cfg.BatchDelay) } } func (pp *eosProducerPool[T]) forwardExecutionContexts() { for { select { case ec := <-pp.buffer: pp.doForwardExecutionContexts(ec) case <-pp.flushTimer.C: pp.tryFlush() } } } func (pp *eosProducerPool[T]) shouldTryFlush() bool { return sak.Max(pp.onDeck.eventContextCnt, atomic.LoadInt64(&pp.onDeck.produceCnt)) >= int64(pp.cfg.TargetBatchSize) } func (pp *eosProducerPool[T]) shouldForceFlush() bool { return sak.Max(pp.onDeck.eventContextCnt, atomic.LoadInt64(&pp.onDeck.produceCnt)) == int64(pp.cfg.MaxBatchSize) } func (pp *eosProducerPool[T]) tryFlush() { if pp.shouldForceFlush() { // the onDeck producer is full // force a swap, blocking until successful pp.commitQueue <- pp.onDeck pp.onDeck = <-pp.producerNodeQueue } else if pp.onDeck.eventContextCnt > 0 { // the committing channel is full, reset the purge timer // to push any lingering items. select { // try to swap so we can commit as fast as possible case pp.commitQueue <- pp.onDeck: pp.onDeck = <-pp.producerNodeQueue default: // we have pending items, try again in 5ms // if new items come in during this interval, this timer may get reset // and the flush proces will begin again pp.flushTimer.Reset(pp.cfg.BatchDelay) } } else { // we don't have pending items, no reason to burn CPU, set the timer to an hour pp.flushTimer.Reset(noPendingDuration) } } func (pp *eosProducerPool[T]) commitLoop() { for { var err error select { case p := <-pp.commitQueue: err = pp.commit(p) case err = <-pp.errorChannel: } if err != nil { if instructions := pp.source.eosErrorHandler()(err); instructions != Continue { pp.source.fail(err) switch instructions { case FailConsumer: return case FatallyExit: panic(err) } } } } } func (pp *eosProducerPool[T]) commit(p *producerNode[T]) error { if pp.startTime.IsZero() { pp.startTime = time.Now() } err := p.commit() if err != nil { log.Errorf("txn commit error: %v", err) return err } pp.producerNodeQueue <- p return err } type eventContextDll[T any] struct { root, tail *EventContext[T] } type producerNode[T any] struct { client *kgo.Client commitClient *kgo.Client txnContext context.Context txnContextCancel func() commitLog *eosCommitLog next *producerNode[T] metrics chan Metric source *Source recordsToProduce []pendingRecord produceCnt int64 byteCount int64 eventContextCnt int64 currentPartitions map[int32]eventContextDll[T] partitionOwners partitionOwners[T] shouldMarkCommit bool commitWaiter sync.Mutex // this is a mutex masquerading as a WaitGroup partitionLock sync.RWMutex produceLock sync.Mutex firstEvent time.Time id int txnErrorHandler TxnErrorHandler errorChannel chan error // errs []error } func newProducerNode[T StateStore](id int, source *Source, commitLog *eosCommitLog, partitionOwners partitionOwners[T], commitClient *kgo.Client, metrics chan Metric, errorChannel chan error) *producerNode[T] { client := sak.Must(NewClient( source.stateCluster(), kgo.RecordPartitioner(NewOptionalPartitioner(kgo.StickyKeyPartitioner(nil))), kgo.TransactionalID(uuid.NewString()), kgo.TransactionTimeout(30*time.Second), )) return &producerNode[T]{ client: client, metrics: metrics, commitClient: commitClient, source: source, errorChannel: errorChannel, id: id, commitLog: commitLog, shouldMarkCommit: source.shouldMarkCommit(), currentPartitions: make(map[int32]eventContextDll[T]), partitionOwners: partitionOwners, txnErrorHandler: source.eosErrorHandler(), recordsToProduce: pendingRecordPool.Borrow(), } } func (p *producerNode[T]) addEventContext(ec *EventContext[T]) bool { p.commitWaiter.Lock() defer p.commitWaiter.Unlock() startTxn := false p.partitionLock.Lock() if p.eventContextCnt == 0 { p.firstEvent = time.Now() startTxn = true } p.eventContextCnt++ partition := ec.partition() // we'll use a linked list in reverse order, since we want the larget offset anyway if dll, ok := p.currentPartitions[partition]; ok { ec.prev = dll.tail dll.tail.next = ec dll.tail = ec // we're not using a ptr, so be sure to set the value p.currentPartitions[partition] = dll } else { ec.revocationWaiter.Add(1) p.currentPartitions[partition] = eventContextDll[T]{ root: ec, tail: ec, } } p.partitionLock.Unlock() return startTxn } func (p *producerNode[T]) beginTransaction() { if err := p.client.BeginTransaction(); err != nil { log.Errorf("could not begin txn err: %v", err) select { case p.errorChannel <- err: default: } } } func (p *producerNode[T]) finalizeEventContexts(first, last *EventContext[T]) error { commitRecordProduced := false // we'll now iterate in reverse order - committing the largest offset for ec := last; ec != nil; ec = ec.prev { select { case <-ec.done: case <-p.txnContext.Done(): return fmt.Errorf("txn timeout exceeded. waiting for event context to finish: %+v", ec) } offset := ec.Offset() // if less than 0, this is an interjection, no record to commit if !commitRecordProduced && offset >= 0 { // we only want to produce the highest offset, since these are in reverse order // produce a commit record for the first real offset we see commitRecordProduced = true crd := p.commitLog.commitRecord(ec.TopicPartition(), offset) p.ProduceRecord(ec, crd, nil) } ec.revocationWaiter.Done() } return nil } func (p *producerNode[T]) commit() error { commitStart := time.Now() p.commitWaiter.Lock() defer p.commitWaiter.Unlock() p.produceLock.Lock() p.flushRemaining() for tp := range p.currentPartitions { p.partitionOwners.set(tp, p) } p.produceLock.Unlock() for _, dll := range p.currentPartitions { if err := p.finalizeEventContexts(dll.root, dll.tail); err != nil { log.Errorf("eos finalization error: %v", err) return err } } err := p.client.Flush(p.txnContext) if err != nil { log.Errorf("eos producer error: %v", err) return err } action := kgo.TryCommit if p.produceCnt == 0 { action = kgo.TryAbort } err = p.client.EndTransaction(p.txnContext, action) if err != nil { log.Errorf("eos producer txn error: %v", err) return err } p.clearState(commitStart) return nil } func (p *producerNode[T]) clearState(executionTime time.Time) { p.txnContextCancel() p.txnContext = nil for _, ecs := range p.currentPartitions { ecs.root.revocationWaiter.Done() if p.shouldMarkCommit { for ec := ecs.tail; ec != nil; ec = ec.prev { if !ec.IsInterjection() { p.commitClient.MarkCommitRecords(&ec.input.kRecord) } } } } partitionCount := len(p.currentPartitions) p.relinquishOwnership() if p.metrics != nil && p.produceCnt > 0 { p.metrics <- Metric{ Operation: TxnCommitOperation, Topic: p.source.Topic(), GroupId: p.source.GroupId(), StartTime: p.firstEvent, ExecuteTime: executionTime, EndTime: time.Now(), Count: int(p.produceCnt), Bytes: int(p.byteCount), PartitionCount: partitionCount, Partition: -1, } } p.produceCnt = 0 p.eventContextCnt = 0 p.byteCount = 0 pendingRecordPool.Release(p.recordsToProduce) p.recordsToProduce = pendingRecordPool.Borrow() } /* relinquishOwnership is needed to maintain produce ordering. If we have more than 2 producerNodes, we can not simply relinquich control of partitions as the onDeck producerNode may take ownership while the pending produceNode may have events for the same partition. In this case, the ondeck producerNode would get priority over the pending, which would break event ordering. Since we round robin through producerNodes, we can iterate through them in order and check to see if the producerNode has any events for the partition in question. If so, transfer ownership immediately which will open up production for pending events. While this does create lock contention, it allows us to produce concurrently accross nodes. It also allows us to start producing records *before* we begin the commit process. */ func (p *producerNode[T]) relinquishOwnership() { for nextNode := p.next; nextNode != p; nextNode = nextNode.next { nextNode.partitionLock.RLock() } for tp := range p.currentPartitions { wasTransfered := false for nextNode := p.next; nextNode != p; nextNode = nextNode.next { nextNode.produceLock.Lock() if _, ok := nextNode.currentPartitions[tp]; ok { nextNode.flushPartition(tp) p.partitionOwners.set(tp, nextNode) nextNode.produceLock.Unlock() wasTransfered = true break } nextNode.produceLock.Unlock() } if !wasTransfered { p.partitionOwners.clear(tp) } delete(p.currentPartitions, tp) } for nextNode := p.next; nextNode != p; nextNode = nextNode.next { nextNode.partitionLock.RUnlock() } } func (p *producerNode[T]) flushPartition(partition int32) { newPends := pendingRecordPool.Borrow() for _, pending := range p.recordsToProduce { if pending.partition != partition { newPends = append(newPends, pending) continue } p.produceKafkaRecord(pending.record, pending.cb) } pendingRecordPool.Release(p.recordsToProduce) p.recordsToProduce = newPends } func (p *producerNode[T]) flushRemaining() { if p.txnContext == nil { // this is the first record produced, let's start our context with timeout now // for now, setting it to txn timeout - 1 second p.txnContext, p.txnContextCancel = context.WithTimeout(context.Background(), 29*time.Second) p.beginTransaction() } for _, pending := range p.recordsToProduce { p.produceKafkaRecord(pending.record, pending.cb) } } func (p *producerNode[T]) ProduceRecord(ec *EventContext[T], record *Record, cb func(*Record, error)) { p.produceLock.Lock() p.produceCnt++ // set the timestamp if not set // we want to capture any time that this record spends in the recordsToProduce buffer if record.kRecord.Timestamp.IsZero() { record.kRecord.Timestamp = time.Now() } if p.partitionOwners.owned(ec.partition(), p) { p.produceKafkaRecord(record, cb) } else { p.recordsToProduce = append(p.recordsToProduce, pendingRecord{ record: record, partition: ec.partition(), cb: cb, }) } p.produceLock.Unlock() } func (p *producerNode[T]) produceKafkaRecord(record *Record, cb func(*Record, error)) { if p.txnContext == nil { // this is the first record produced, let's start our context with timeout now // for now, setting it to txn timeout - 1 second p.txnContext, p.txnContextCancel = context.WithTimeout(context.Background(), 29*time.Second) p.beginTransaction() } p.client.Produce(p.txnContext, record.toKafkaRecord(), func(r *kgo.Record, err error) { record.kRecord = *r atomic.AddInt64(&p.byteCount, int64(recordSize(*r))) if err != nil { log.Errorf("%v, record %+v", err, r) select { case p.errorChannel <- err: default: } } if cb != nil { cb(record, err) } record.Release() }) }
537
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams // in structs GKES and how to proceed when an error is encountered. type ErrorResponse int const ( // Instructs GKES to ignore any error stateand continue processing as normal. If this is used in response to // Kafka transaction error, there will likely be data loss or corruption. This ErrorResponse is not recommended as it is unlikely that // a consumer will be able to recover gracefully from a transaction error. In almost all situations, FailConsumer is preferred. Continue ErrorResponse = iota // Instructs GKES to immediately stop processing and the consumer to immediately leave the group. // This is preferable to a FatallyExit as Kafka will immediatly recognize the consumer as exiting the group // (if there is still comminication with the cluster) and processing of the // failed partitions will begin without waiting for the session timeout value. FailConsumer // As the name implies, the application will fatally exit. The partitions owned by this consumer will not be reassigned until the configured // session timeout on the broker. FatallyExit ) type ErrorContext interface { TopicPartition() TopicPartition Offset() int64 Input() (IncomingRecord, bool) } // The default DeserializationErrorHandler. Simply logs the error and returns [Continue]. func DefaultDeserializationErrorHandler(ec ErrorContext, eventType string, err error) ErrorResponse { log.Errorf("failed to deserialize record for %+v, offset: %d, eventType: %s,error: %v", ec.TopicPartition(), ec.Offset(), eventType, err) return Continue } // The default and recommended TxnErrorHandler. Returns [FailConsumer] on txn errors. func DefaultTxnErrorHandler(err error) ErrorResponse { log.Errorf("failing consumer due to eos txn error: %v", err) return FailConsumer }
54
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "sync" "unsafe" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/twmb/franz-go/pkg/kgo" ) type AsyncCompleter[T any] interface { AsyncComplete(AsyncJob[T]) } type EventContextProducer[T any] interface { ProduceRecord(*EventContext[T], *Record, func(*Record, error)) } // Contains information about the current event. Is passed to EventProcessors and Interjections type EventContext[T any] struct { // we're going to keep a reference to the partition worker context // so we can skip over any buffered events in the EOSProducer ctx context.Context producerChan chan EventContextProducer[T] producer EventContextProducer[T] revocationWaiter *sync.WaitGroup next *EventContext[T] prev *EventContext[T] input IncomingRecord asyncCompleter AsyncCompleter[T] changeLog changeLogData[T] done chan struct{} topicPartition TopicPartition interjection *interjection[T] } // A convenience function for creating unit tests for an EventContext from an incoming Kafka Record. All arguments other than `ctx` // are optional unless you are interacting with those resources. For example, if you call EventContext.Forward/RecordChange, you will need to provide a mock producer. // If you run the EventContext through an async process, you will need to provide a mock AsyncCompleter. // // func TestMyHandler(t *testing.T) { // eventContext := streams.MockEventContext(context.TODO(), mockRecord(), "storeTopic", mockStore(), mockCompleter(), mockProducer()) // if err := testMyHandler(eventContext, eventContext.Input()); { // t.Error(err) // } // } func MockEventContext[T any](ctx context.Context, input *Record, stateStoreTopc string, store T, asyncCompleter AsyncCompleter[T], producer EventContextProducer[T]) *EventContext[T] { ec := &EventContext[T]{ ctx: ctx, changeLog: changeLogData[T]{ topic: stateStoreTopc, store: store, }, asyncCompleter: asyncCompleter, producer: producer, } if input != nil { ec.input = input.AsIncomingRecord() ec.topicPartition = input.TopicPartition() } return ec } // A convenience function for creating unit tests for an EventContext from an interjection. All arguments other than `ctx` // are optional unless you are interacting with those resources. For example, if you call EventContext.Forward/RecordChange, you will need to provide a mock producer. // If you run the EventContext through an async process, you will need to provide a mock AsyncCompleter. // // func TestMyInterjector(t *testing.T) { // eventContext := streams.MockInterjectionEventContext(context.TODO(), myTopicPartition, "storeTopic", mockStore(), mockCompleter(), mockProducer()) // if err := testMyInterjector(eventContext, time.Now()); { // t.Error(err) // } // } func MockInterjectionEventContext[T any](ctx context.Context, topicPartition TopicPartition, stateStoreTopc string, store T, asyncCompleter AsyncCompleter[T], producer EventContextProducer[T]) *EventContext[T] { ec := &EventContext[T]{ topicPartition: topicPartition, changeLog: changeLogData[T]{ topic: stateStoreTopc, store: store, }, asyncCompleter: asyncCompleter, producer: producer, interjection: &interjection[T]{}, } return ec } func (ec *EventContext[T]) isRevoked() bool { return ec.ctx.Err() != nil } // Returns true if this EventContext represents an Interjection func (ec *EventContext[T]) IsInterjection() bool { return ec.interjection != nil } // The offset for this event, -1 for an Interjection func (ec *EventContext[T]) Offset() int64 { if ec.IsInterjection() { return -1 } return ec.input.Offset() } // The TopicParition for this event. It is present for both normal events and Interjections func (ec *EventContext[T]) TopicPartition() TopicPartition { return ec.topicPartition } // The parition for this event. It is present for both normal events and Interjections func (ec *EventContext[T]) partition() int32 { return ec.topicPartition.Partition } // Forwards produces records on the transactional producer for your EventSource. // If the transaction fails, records produced in this fashion will not be visible to other consumers who have a fetch isolation of `read_commited`. // An isolation level of `read_commited“ is required for Exactly Once Semantics // // It is important to note that GKES uses a Record pool. After the transaction has completed for this record, it is returned to the pool for reuse. // Your application should not hold on to references to the Record(s) after Forward has been invoked. func (ec *EventContext[T]) Forward(records ...*Record) { for _, record := range records { ec.producer.ProduceRecord(ec, record, nil) } } // Forwards records to the transactional producer for your EventSource. When you add an item to your StateStore, // you must call this method for that change to be recorded in the stream. This ensures that when the TopicPartition for this change // is tansferred to a new consumer, it will also have this change. // If the transaction fails, records produced in this fashion will not be visible to other consumers who have a fetch isolation of `read_commited`. // An isolation level of `read_commited“ is required for Exactly Once Semantics // // It is important to note that GKES uses a Record pool. After the transaction has completed for this record, it is returned to the pool for reuse. // Your application should not hold on to references to the ChangeLogEntry(s) after RecordChange has been invoked. func (ec *EventContext[T]) RecordChange(entries ...ChangeLogEntry) { for _, entry := range entries { if len(ec.changeLog.topic) > 0 { log.Tracef("RecordChange changeLogTopic: %s, topicPartition: %+v, value: %v", ec.changeLog.topic, ec.topicPartition, entry) record := entry.record. WithTopic(ec.changeLog.topic). WithPartition(ec.topicPartition.Partition) ec.producer.ProduceRecord(ec, record, nil) } else { log.Warnf("EventContext.RecordChange was called but consumer is not stateful") } } } // AsyncJobComplete should be called when an async event processor has performed it's function. // the finalize cunction should return Complete if there are no other pending asynchronous jobs for the event context in question, // regardless of error state. `finalize` does no accept any arguments, so you're callback should encapsulate // any pertinent data needed for processing. If you are using [ ], [AsyncJobScheduler] or [BatchProducer], you should not need to interact with this method directly. func (ec *EventContext[T]) AsyncJobComplete(finalize func() ExecutionState) { ec.asyncCompleter.AsyncComplete(AsyncJob[T]{ ctx: ec, finalizer: finalize, }) } // Return the raw input record for this event or an uninitialized record and false if the EventContect represents an Interjections func (ec *EventContext[T]) Input() (IncomingRecord, bool) { return ec.input, !ec.IsInterjection() } // Returns the StateStore for this event/TopicPartition func (ec *EventContext[T]) Store() T { return ec.changeLog.store } func (ec *EventContext[T]) complete() { close(ec.done) } func newEventContext[T StateStore](ctx context.Context, record *kgo.Record, changeLog changeLogData[T], pw *partitionWorker[T]) *EventContext[T] { input := newIncomingRecord(record) ec := &EventContext[T]{ ctx: ctx, producerChan: make(chan EventContextProducer[T], 1), topicPartition: input.TopicPartition(), changeLog: changeLog, input: input, interjection: nil, asyncCompleter: pw.asyncCompleter, revocationWaiter: (*sync.WaitGroup)(sak.Noescape(unsafe.Pointer(&pw.revocationWaiter))), done: make(chan struct{}), } return ec } func newInterjectionContext[T StateStore](ctx context.Context, interjection *interjection[T], topicPartition TopicPartition, changeLog changeLogData[T], pw *partitionWorker[T]) *EventContext[T] { ec := &EventContext[T]{ ctx: ctx, producerChan: make(chan EventContextProducer[T], 1), topicPartition: topicPartition, interjection: interjection, changeLog: changeLog, asyncCompleter: pw.asyncCompleter, revocationWaiter: (*sync.WaitGroup)(sak.Noescape(unsafe.Pointer(&pw.revocationWaiter))), done: make(chan struct{}), } return ec }
219
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "errors" "os" "os/signal" "sync" "syscall" "time" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/twmb/franz-go/pkg/kgo" ) var ErrPartitionNotAssigned = errors.New("partition is not assigned") var ErrPartitionNotReady = errors.New("partition is not ready") // EventSource provides an abstraction over raw kgo.Record/streams.IncomingRecord consumption, allowing the use of strongly typed event handlers. // One of the key features of the EventSource is to allow for the routing of events based off of a type header. See RegisterEventType for details. type EventSource[T StateStore] struct { rootProcessor *eventProcessorWrapper[T] tailProcessor *eventProcessorWrapper[T] stateStoreFactory StateStoreFactory[T] defaultProcessor EventProcessor[T, IncomingRecord] consumer *eventSourceConsumer[T] interjections []interjection[T] source *Source runStatus sak.RunStatus done chan struct{} metrics chan Metric stopOnce sync.Once } /* Create an EventSource. `defaultProcessor` will be invoked if a suitable EventProcessor can not be found, or the IncomingRecord has no RecordType header. `additionalClientoptions` allows you to add additional options to the underlying kgo.Client. There are some restrictions here however. The following options are reserved: kgo.Balancers kgo.ConsumerGroup kgo.ConsumeTopics kgo.OnPartitionsAssigned kgo.OnPartitionsRevoked kgo.AdjustFetchOffsetsFn In addition, if you wish to set a TopicPartitioner for use in EventContext.Forward(), the partitioner must be of the supplied [OptionalPartitioner] as StateStore entries require manual partitioning and are produced on the same client as used by the EventContext for producing records. The default partitioner is initialized as follows, which should give parity with the canonical Java murmur2 partitioner: kgo.RecordPartitioner(NewOptionalPartitioner(kgo.StickyKeyPartitioner(nil))) */ func NewEventSource[T StateStore](sourceConfig EventSourceConfig, stateStoreFactory StateStoreFactory[T], defaultProcessor EventProcessor[T, IncomingRecord], additionalClientOptions ...kgo.Opt) (*EventSource[T], error) { source, err := CreateSource(sourceConfig) if err != nil { return nil, err } var metrics chan Metric if source.config.MetricsHandler != nil { metrics = make(chan Metric, 2048) } es := &EventSource[T]{ defaultProcessor: defaultProcessor, stateStoreFactory: stateStoreFactory, source: source, runStatus: sak.NewRunStatus(context.Background()), done: make(chan struct{}, 1), metrics: metrics, } es.consumer, err = newEventSourceConsumer(es, additionalClientOptions...) return es, err } // ConsumeEvents starts the underlying Kafka consumer. This call is non-blocking, // so if called from main(), it should be followed by some other blocking call to prevent the application from exiting. // See [streams.EventSource.WaitForSignals] for an example. func (es *EventSource[T]) ConsumeEvents() { go es.emitMetrics() go es.consumer.start() go es.closeOnFail() } // Returns the [EventSourceState] of the underlying [Source], [Healthy] or [Unhealthy]. // When the EventSource encounters an unrecoverable error (unable to execute a transaction for example), it will enter an [Unhealthy] state. // Intended to be used by a health check processes for rolling back during a bad deployment. func (es *EventSource[T]) State() EventSourceState { return es.source.State() } // The [Source] used by the EventSource. func (es *EventSource[T]) Source() *Source { return es.source } func (es *EventSource[T]) closeOnFail() { err := <-es.source.failure log.Errorf("closing consumer due to failure: %v", err) // since the consumer will stop processing all together // we want to immediatelyy relinquich control of all partitions es.StopNow() } func (es *EventSource[T]) EmitMetric(m Metric) { if es.metrics != nil { select { case es.metrics <- m: default: log.Warnf("metrics channel full, unable to emit metrics: %+v", m) } } } func (es *EventSource[T]) emitMetrics() { if es.metrics == nil { return } handler := es.source.config.MetricsHandler for { select { case m := <-es.metrics: if m.ExecuteTime.IsZero() { m.ExecuteTime = m.StartTime } handler(m) case <-es.runStatus.Done(): close(es.metrics) return } } } /* WaitForSignals is convenience function suitable for use in a main() function. Blocks until `signals` are received then gracefully closes the consumer by calling [streams.EventSource.Stop]. If `signals` are not provided, syscall.SIGINT and syscall.SIGHUP are used. If `preHook` is non-nil, it will be invoked before Stop() is invoked. If the preHook returns false, this call continues to block. If true is returned, `signal.Reset(signals...)` is invoked and the consumer shutdown process begins. Simple example: func main(){ myEventSource := initEventSource() myEventSource.ConsumeEvents() myEventSource.WaitForSignals(nil) fmt.Println("exiting") } Prehook example: func main(){ myEventSource := initEventSource() myEventSource.ConsumeEvents() myEventSource.WaitForSignals(func(s os.Signal) bool { fmt.Printf("starting shutdown from signal %v\n", s) shutDownSomeOtherProcess() return true }) fmt.Println("exiting") } In this example, The consumer will close on syscall.SIGINT or syscall.SIGHUP but not syscall.SIGUSR1: func main(){ myEventSource := initEventSource() myEventSource.ConsumeEvents() myEventSource.WaitForSignals(func(s os.Signal) bool { if s == syscall.SIGUSR1 { fmt.Println("user signal received") performSomeTask() return false } return true }, syscall.SIGINT and syscall.SIGHUP, syscall.SIGUSR1) fmt.Println("exiting") } */ func (es *EventSource[T]) WaitForSignals(preHook func(os.Signal) bool, signals ...os.Signal) { if len(signals) == 0 { signals = []os.Signal{syscall.SIGINT, syscall.SIGHUP} } if preHook == nil { preHook = func(_ os.Signal) bool { return true } } c := make(chan os.Signal, 1) signal.Notify(c, signals...) for s := range c { if preHook(s) { signal.Reset(signals...) break } } es.Stop() <-es.Done() } /* WaitForChannel is similar to WaitForSignals, but blocks on a `chan struct{}` then invokes `callback` when finished. Useful when you have multiple EventSources in a single application. Example: func main() { myEventSource1 := initEventSource1() myEventSource2.ConsumeEvents() myEventSource2 := initEventSource2() myEventSource2.ConsumeEvents() wg := &sync.WaitGroup{} wg.Add(2) eventSourceChannel = make(chan struct{}) go myEventSource1.WaitForChannel(eventSourceChannel, wg.Done) go myEventSource2.WaitForChannel(eventSourceChannel, wg.Done) osChannel := make(chan os.Signal) signal.Notify(osChannel, syscall.SIGINT, syscall.SIGHUP) <-osChannel close(eventSourceChannel) wg.Wait() fmt.Println("exiting") } */ func (es *EventSource[T]) WaitForChannel(c chan struct{}, callback func()) { <-c es.Stop() <-es.Done() if callback != nil { callback() } } // Done blocks while the underlying Kafka consumer is active. func (es *EventSource[T]) Done() <-chan struct{} { return es.done } // Signals the underlying *kgo.Client that the underlying consumer should exit the group. // If you are using an IncrementalGroupRebalancer, this will trigger a graceful exit where owned partitions are surrendered // according to it's configuration. If you are not, this call has the same effect as [streams.EventSource.StopNow]. // // Calls to Stop are not blocking. To block during the shut down process, this call should be followed by `<-eventSource.Done()` // // To simplify running from main(), the [streams.EventSource.WaitForSignals] and [streams.EventSource.WaitForChannel] calls have been provided. // So unless you have extremely complex application shutdown logic, you should not need to interact with this method directly. func (es *EventSource[T]) Stop() { es.stopOnce.Do(func() { go func() { <-es.consumer.leave() es.runStatus.Halt() // will close all sub processes (commitLog, stateStoreConsumer) select { case es.done <- struct{}{}: default: } }() }) } func (es *EventSource[T]) ForkRunStatus() sak.RunStatus { return es.runStatus.Fork() } // Immediately stops the underlying consumer *kgo.Client by invoking sc.client.Close() // This has the effect of immediately surrendering all owned partitions, then closing the client. // If you are using an IncrementalGroupRebalancer, this can be used as a force quit. func (es *EventSource[T]) StopNow() { es.runStatus.Halt() es.consumer.stop() select { case es.done <- struct{}{}: default: } } /* ScheduleInterjection sets a timer for `interjector` to be run `every` time interval, plus or minues a random time.Duration not greater than the absolute value of `jitter` on every invocation. `interjector` will have access to EventContext.Store() and can create/delete store items, or forward events just as a standard EventProcessor. Example: func cleanupStaleItems(ec *EventContext[myStateStore], when time.Time) streams.ExecutionState { ec.Store().cleanup(when) return ec.Complete } // schedules cleanupStaleItems to be executed every 900ms - 1100ms eventSource.ScheduleInterjection(cleanupStaleItems, time.Second, 100 * time.Millisecond) */ func (es *EventSource[T]) ScheduleInterjection(interjector Interjector[T], every, jitter time.Duration) { es.interjections = append(es.interjections, interjection[T]{ interjector: interjector, every: every, jitter: jitter, }) } // Executes `cmd` in the context of the given partition. func (es *EventSource[T]) Interject(partition int32, cmd Interjector[T]) <-chan error { return es.consumer.interject(partition, cmd) } /* InterjectAll is a convenience function which allows you to Interject into every active partition assigned to the consumer without create an individual timer per partition. The equivalent of calling Interject() on each active partition, blocking until all are performed. It is worth noting that the interjections are run in parallel, so care must be taken not to create a deadlock between partitions via locking mechanisms such as a Mutex. If parallel processing is not of concern, [streams.EventSource.InterjectAllSync] is an alternative. Useful for gathering store statistics, but can be used in place of a standard Interjection. Example: preCount := int64(0) postCount := int64(0) eventSource.InterjectAllAsync(func (ec *EventContext[myStateStore], when time.Time) streams.ExecutionState { store := ec.Store() atomic.AddInt64(&preCount, int64(store.Len())) store.performBookeepingTasks() atomic.AddInt64(&postCount, int64(store.Len())) return streams.Complete }) fmt.Printf("Number of items before: %d, after: %d\n", preCount, postCount) */ func (es *EventSource[T]) InterjectAll(interjector Interjector[T]) { es.consumer.forEachChangeLogPartitionAsync(interjector) } /* InterjectAllSync performs the same function as [streams.EventSource.InterjectAll], however it blocks on each iteration. It may be useful if parallel processing is not of concern andyou want to avoid locking on a shared data structure. Example: itemCount := 0 eventSource.InterjectAll(func (ec *EventContext[myStateStore], when time.Time) streams.ExecutionState { store := ec.Store() itemCount += store.Len() return streams.Complete }) fmt.Println("Number of items: ", itemCount) */ func (es *EventSource[T]) InterjectAllSync(interjector Interjector[T]) { es.consumer.forEachChangeLogPartitionSync(interjector) } func (ec *EventSource[T]) createChangeLogReceiver(tp TopicPartition) T { return ec.stateStoreFactory(tp) } // Starts the event processing by invoking registered processors. If no processors exist for record.recordTpe, the defaultProcessor will be invoked. func (es *EventSource[T]) handleEvent(ctx *EventContext[T], record IncomingRecord) ExecutionState { state := unknownType if es.rootProcessor != nil { state = es.rootProcessor.process(ctx, record) } if state == unknownType { state = es.defaultProcessor(ctx, record) } return state } // Registers eventType with a transformer (usuall a codec.Codec) with the supplied EventProcessor. // Must not be called after `EventSource.ConsumeEvents()` func RegisterEventType[T StateStore, V any](es *EventSource[T], transformer IncomingRecordDecoder[V], eventProcessor EventProcessor[T, V], eventType string) { ep := newEventProcessorWrapper(eventType, transformer, eventProcessor, es.source.deserializationErrorHandler()) if es.rootProcessor == nil { es.rootProcessor, es.tailProcessor = ep, ep } else { es.tailProcessor.next = ep es.tailProcessor = ep } } // A convenience method to avoid chick-egg scenarios when initializing an EventSource. // Must not be called after `EventSource.ConsumeEvents()` func RegisterDefaultHandler[T StateStore](es *EventSource[T], recordProcessor EventProcessor[T, IncomingRecord], eventType string) { es.defaultProcessor = recordProcessor } type eventExecutor[T any] interface { Exec(*EventContext[T], IncomingRecord) ExecutionState } // Wraps an EventProcessor with a function that decodes the record before invoking eventProcessor. // Doing some type gymnastics here. // We have 2 generic types declared here, but to have a eventProcessorWrapper[T,V], *next[T,X] would not work. // Golang generics do no yet allow for defining new type in struct method declarations, so we have a private interface // wrapped by a generic. type eventProcessorWrapper[T any] struct { eventType string eventExecutor eventExecutor[T] next *eventProcessorWrapper[T] } type eventProcessorExecutor[T any, V any] struct { process EventProcessor[T, V] decode IncomingRecordDecoder[V] handleDeserializationError DeserializationErrorHandler } func (epe *eventProcessorExecutor[T, V]) Exec(ec *EventContext[T], record IncomingRecord) ExecutionState { if event, err := epe.decode(record); err == nil { return epe.process(ec, event) } else { if epe.handleDeserializationError(ec, record.RecordType(), err) == Continue { return Complete } return Incomplete } } func newEventProcessorWrapper[T any, V any](eventType string, decoder IncomingRecordDecoder[V], eventProcessor EventProcessor[T, V], deserializationErrorHandler DeserializationErrorHandler) *eventProcessorWrapper[T] { return &eventProcessorWrapper[T]{ eventType: eventType, eventExecutor: &eventProcessorExecutor[T, V]{ process: eventProcessor, decode: decoder, handleDeserializationError: deserializationErrorHandler, }, } } func (ep *eventProcessorWrapper[T]) exec(ec *EventContext[T], record IncomingRecord) ExecutionState { return ep.eventExecutor.Exec(ec, record) } // process the record if records.recordType == eventProcessorWrapper.eventType, otherwise forward this record to the next processor. func (ep *eventProcessorWrapper[T]) process(ctx *EventContext[T], record IncomingRecord) ExecutionState { if record.RecordType() == ep.eventType { return ep.exec(ctx, record) } if ep.next != nil { return ep.next.process(ctx, record) } return unknownType }
446
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "testing" "time" "github.com/google/btree" ) func TestEventSourceInsert(t *testing.T) { if testing.Short() { t.Skip() return } itemCount := 10000 //must be multiple of 10 for this to work es, p, c := newTestEventSource() p.produceMany(t, "int", itemCount) es.ConsumeEvents() defer es.StopNow() p.waitForAllPartitions(t, c, defaultTestTimeout) trees := []*btree.BTreeG[intStoreItem]{} es.InterjectAllSync(func(ec *EventContext[intStore], _ time.Time) ExecutionState { tree := ec.Store().tree trees = append(trees, tree.Clone()) if tree.Len() != itemCount/10 { t.Errorf("incorrect number of items in partition. actual: %d, expected: %d", tree.Len(), itemCount/10) } return Complete }) if len(trees) != es.consumer.source.Config().NumPartitions { t.Errorf("incorrect number of stores. actual: %d, expected: %d", len(trees), es.consumer.source.Config().NumPartitions) } data := make(map[int]int) for _, tree := range trees { tree.Ascend(func(item intStoreItem) bool { if item.Key != item.Value { t.Errorf("incorrect item value. actual: %d, expected: %d", item.Value, item.Key) } if _, ok := data[item.Key]; ok { t.Errorf("duplicate key: %d", item.Key) } data[item.Key] = item.Value return true }) } if len(data) != itemCount { t.Errorf("incorrect item count. actual: %d, expected: %d", len(data), itemCount) } for i := 0; i < itemCount; i++ { if _, ok := data[i]; !ok { t.Errorf("missing key: %d", i) } } } func TestEventSourceDelete(t *testing.T) { if testing.Short() { t.Skip() return } itemCount := 10000 //must be multiple of 10 for this to work es, p, c := newTestEventSource() p.produceMany(t, "int", itemCount) p.delete(t, "int", 0) es.ConsumeEvents() defer es.StopNow() p.waitForAllPartitions(t, c, defaultTestTimeout) ij := func(ec *EventContext[intStore], _ time.Time) ExecutionState { targetCount := (itemCount / 10) - 1 if ec.Store().tree.Len() != targetCount { t.Errorf("incorrect item count. actual: %d, expected: %d", ec.Store().tree.Len(), targetCount) } return Complete } if err := <-es.Interject(0, ij); err != nil { t.Error(err) } p.deleteMany(t, "int", itemCount) p.waitForAllPartitions(t, c, defaultTestTimeout) trees := []*btree.BTreeG[intStoreItem]{} es.InterjectAllSync(func(ec *EventContext[intStore], _ time.Time) ExecutionState { tree := ec.Store().tree trees = append(trees, tree.Clone()) if tree.Len() != 0 { t.Errorf("incorrect number of items in partition. actual: %d, expected: %d", tree.Len(), 0) } return Complete }) if len(trees) != es.consumer.source.Config().NumPartitions { t.Errorf("incorrect number of stores. actual: %d, expected: %d", len(trees), es.consumer.source.Config().NumPartitions) } }
118
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams_test import ( "context" "fmt" "time" "github.com/aws/go-kafka-event-source/streams" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/aws/go-kafka-event-source/streams/stores" ) type Contact struct { Id string PhoneNumber string Email string FirstName string LastName string LastContact time.Time } type NotifyContactEvent struct { ContactId string NotificationType string } type EmailNotification struct { ContactId string Address string Payload string } func (c Contact) Key() string { return c.Id } func createContact(ctx *streams.EventContext[ContactStore], contact Contact) streams.ExecutionState { contactStore := ctx.Store() ctx.RecordChange(contactStore.Put(contact)) fmt.Printf("Created contact: %s\n", contact.Id) return streams.Complete } func deleteContact(ctx *streams.EventContext[ContactStore], contact Contact) streams.ExecutionState { contactStore := ctx.Store() if entry, ok := contactStore.Delete(contact); ok { ctx.RecordChange(entry) fmt.Printf("Deleted contact: %s\n", contact.Id) } return streams.Complete } func notifyContact(ctx *streams.EventContext[ContactStore], notification NotifyContactEvent) streams.ExecutionState { contactStore := ctx.Store() if contact, ok := contactStore.Get(notification.ContactId); ok { fmt.Printf("Notifying contact: %s by %s\n", contact.Id, notification.NotificationType) } else { fmt.Printf("Contact %s does not exist!\n", notification.ContactId) } return streams.Complete } // simply providing an example of how you might wrap the store into your own type type ContactStore struct { *stores.SimpleStore[Contact] } func NewContactStore(tp streams.TopicPartition) ContactStore { return ContactStore{stores.NewJsonSimpleStore[Contact](tp)} } var notificationScheduler *streams.AsyncJobScheduler[ContactStore, string, EmailNotification] func notifyContactAsync(ctx *streams.EventContext[ContactStore], notification NotifyContactEvent) streams.ExecutionState { contactStore := ctx.Store() if contact, ok := contactStore.Get(notification.ContactId); ok { fmt.Printf("Notifying contact: %s asynchronously by %s\n", contact.Id, notification.NotificationType) return notificationScheduler.Schedule(ctx, contact.Email, EmailNotification{ ContactId: contact.Id, Address: contact.Email, Payload: "sending you mail...from a computer!", }) } else { fmt.Printf("Contact %s does not exist!\n", notification.ContactId) } return streams.Complete } func sendEmailToContact(key string, notification EmailNotification) error { // note: the AsyncJobProcessor does not have access to the StateStore fmt.Printf("Processing an email job with key: '%s'. This may take some time, emails are tricky!\n", key) time.Sleep(500 * time.Millisecond) // simulating how long it might to send an email return nil } func emailToContactComplete(ctx *streams.EventContext[ContactStore], _ string, email EmailNotification, err error) streams.ExecutionState { // the AsyncJobFinalizer has access to the StateStore associated with this event contactStore := ctx.Store() if contact, ok := contactStore.Get(email.ContactId); ok { fmt.Printf("Notified contact: %s, address: %s, payload: '%s'\n", contact.Id, email.Address, email.Payload) contact.LastContact = time.Now() contactStore.Put(contact) } return streams.Complete } func ExampleEventSource() { streams.InitLogger(streams.SimpleLogger(streams.LogLevelError), streams.LogLevelError) contactsCluster := streams.SimpleCluster([]string{"127.0.0.1:9092"}) sourceConfig := streams.EventSourceConfig{ GroupId: "ExampleEventSourceGroup", Topic: "ExampleEventSource", NumPartitions: 10, SourceCluster: contactsCluster, } destination := streams.Destination{ Cluster: sourceConfig.SourceCluster, DefaultTopic: sourceConfig.Topic, } eventSource := sak.Must(streams.NewEventSource(sourceConfig, NewContactStore, nil)) streams.RegisterEventType(eventSource, streams.JsonItemDecoder[Contact], createContact, "CreateContact") streams.RegisterEventType(eventSource, streams.JsonItemDecoder[Contact], deleteContact, "DeleteContact") streams.RegisterEventType(eventSource, streams.JsonItemDecoder[NotifyContactEvent], notifyContact, "NotifyContact") eventSource.ConsumeEvents() contact := Contact{ Id: "123", PhoneNumber: "+18005551212", FirstName: "Billy", LastName: "Bob", } notification := NotifyContactEvent{ ContactId: "123", NotificationType: "email", } producer := streams.NewProducer(destination) createContactRecord := streams.JsonItemEncoder("CreateContact", contact) createContactRecord.WriteKeyString(contact.Id) deleteContactRecord := streams.JsonItemEncoder("DeleteContact", contact) deleteContactRecord.WriteKeyString(contact.Id) notificationRecord := streams.JsonItemEncoder("NotifyContact", notification) notificationRecord.WriteKeyString(notification.ContactId) producer.Produce(context.Background(), createContactRecord) producer.Produce(context.Background(), notificationRecord) producer.Produce(context.Background(), deleteContactRecord) producer.Produce(context.Background(), notificationRecord) eventSource.WaitForSignals(nil) // Expected Output: Created contact: 123 // Notifying contact: 123 by email // Deleted contact: 123 // Contact 123 does not exist! } func ExampleAsyncJobScheduler() { streams.InitLogger(streams.SimpleLogger(streams.LogLevelError), streams.LogLevelError) contactsCluster := streams.SimpleCluster([]string{"127.0.0.1:9092"}) sourceConfig := streams.EventSourceConfig{ GroupId: "ExampleAsyncJobSchedulerGroup", Topic: "ExampleAsyncJobScheduler", NumPartitions: 10, SourceCluster: contactsCluster, } destination := streams.Destination{ Cluster: sourceConfig.SourceCluster, DefaultTopic: sourceConfig.Topic, } eventSource := sak.Must(streams.NewEventSource(sourceConfig, NewContactStore, nil)) streams.RegisterEventType(eventSource, streams.JsonItemDecoder[Contact], createContact, "CreateContact") streams.RegisterEventType(eventSource, streams.JsonItemDecoder[NotifyContactEvent], notifyContactAsync, "NotifyContact") notificationScheduler = sak.Must(streams.CreateAsyncJobScheduler(eventSource, sendEmailToContact, emailToContactComplete, streams.DefaultConfig)) eventSource.ConsumeEvents() contact := Contact{ Id: "123", Email: "[email protected]", PhoneNumber: "+18005551212", FirstName: "Billy", LastName: "Bob", } notification := NotifyContactEvent{ ContactId: "123", NotificationType: "email", } producer := streams.NewProducer(destination) createContactRecord := streams.JsonItemEncoder("CreateContact", contact) createContactRecord.WriteKeyString(contact.Id) notificationRecord := streams.JsonItemEncoder("NotifyContact", notification) notificationRecord.WriteKeyString(notification.ContactId) producer.Produce(context.Background(), createContactRecord) producer.Produce(context.Background(), notificationRecord) eventSource.WaitForSignals(nil) // Expected Output: Created contact: 123 // Notifying contact: 123 asynchronously by email // Processing an email job with key: '[email protected]'. This may take some time, emails are tricky! // Notified contact: 123, address: [email protected], payload: 'sending you mail...from a computer!' } // func ExampleAsyncBatcher() { // streams.InitLogger(streams.SimpleLogger(streams.LogLevelError), streams.LogLevelError) // var contactsCluster = streams.SimpleCluster([]string{"127.0.0.1:9092"}) // var source = streams.Source{ // GroupId: "ExampleAsyncJobSchedulerGroup", // Topic: "ExampleAsyncJobScheduler", // NumPartitions: 10, // SourceCluster: contactsCluster, // } // var destination = streams.Destination{ // Cluster: source.SourceCluster, // DefaultTopic: source.Topic, // } // source, err := streams.CreateSource(source) // if err != nil { // panic(err) // } // eventSource, err := streams.NewEventSource(source, NewContactStore, nil) // if err != nil { // panic(err) // } // streams.RegisterEventType(eventSource, streams.JsonItemDecoder[Contact], createContact, "CreateContact") // streams.RegisterEventType(eventSource, streams.JsonItemDecoder[NotifyContactEvent], notifyContactAsync, "NotifyContact") // notificationScheduler, err = streams.CreateAsyncJobScheduler(eventSource, // sendEmailToContact, emailToContactComplete, streams.DefaultConfig) // if err != nil { // panic(err) // } // wg.Add(3) // we're expecting 3 records in this example // eventSource.ConsumeEvents() // contact := Contact{ // Id: "123", // Email: "[email protected]", // PhoneNumber: "+18005551212", // FirstName: "Billy", // LastName: "Bob", // } // notification := NotifyContactEvent{ // ContactId: "123", // NotificationType: "email", // } // producer := streams.NewProducer(destination) // createContactRecord := streams.JsonItemEncoder("CreateContact", contact) // createContactRecord.WriteKeyString(contact.Id) // notificationRecord := streams.JsonItemEncoder("NotifyContact", notification) // notificationRecord.WriteKeyString(notification.ContactId) // producer.Produce(context.Background(), createContactRecord) // producer.Produce(context.Background(), notificationRecord) // wg.Wait() // eventSource.Stop() // <-eventSource.Done() // // cleaning up our local Kafka cluster // // you probably don't want to delete your topic // streams.DeleteSource(source) // // Output: Created contact: 123 // // Notifying contact: 123 asynchronously by email // // Processing an email job with key: '[email protected]'. This may take some time, emails are tricky! // // Notified contact: 123, address: [email protected], payload: 'sending you mail...from a computer!' // }
309
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import "time" // Defines a method which accepts a TopiCPartition argument and returns T type TopicPartitionCallback[T any] func(TopicPartition) T // A callback invoked when a new TopicPartition has been assigned to a EventSource. Your callback should return an empty StateStore. type StateStoreFactory[T StateStore] TopicPartitionCallback[T] // A callback invoked when a new record has been received from the EventSource. type IncomingRecordDecoder[V any] func(IncomingRecord) (V, error) // A callback invoked when a new record has been received from the EventSource, after it has been transformed via IncomingRecordTransformer. type EventProcessor[T any, V any] func(*EventContext[T], V) ExecutionState type SourcePartitionEventHandler func(*Source, int32) type MetricsHandler func(Metric) type DeserializationErrorHandler func(ec ErrorContext, eventType string, err error) ErrorResponse type TxnErrorHandler func(err error) ErrorResponse // A handler invoked when a previously scheduled AsyncJob should be performed. type AsyncJobProcessor[K comparable, V any] func(K, V) error // A callback invoked when a previously scheduled AsyncJob has been completed. type AsyncJobFinalizer[T any, K comparable, V any] func(*EventContext[T], K, V, error) ExecutionState type BatchCallback[S any, K comparable, V any] func(*EventContext[S], *BatchItems[S, K, V]) ExecutionState type BatchExecutor[K comparable, V any] func(batch []*BatchItem[K, V]) type BatchProducerCallback[S any] func(eventContext *EventContext[S], records []*Record, userData any) ExecutionState // Defines the method signature needed by the EventSource to perform a stream interjection. See EventSource.Interject. type Interjector[T any] func(*EventContext[T], time.Time) ExecutionState
53
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/twmb/franz-go/pkg/kgo" ) type CleanupPolicy int const ( CompactCleanupPolicy CleanupPolicy = iota DeleteCleanupPolicy ) type ChangeLogReceiver interface { ReceiveChange(IncomingRecord) error } // A GlobalChangeLog is simply a consumer which continously consumes all partitions within the given topic and // forwards all records to it's StateStore. GlobalChangeLogs can be useful for sharing small amounts of data between // a group of hosts. For example, GKES uses a global change log to keep track of consumer group offsets. type GlobalChangeLog[T ChangeLogReceiver] struct { receiver T client *kgo.Client runStatus sak.RunStatus numPartitions int topic string cleanupPolicy CleanupPolicy } // Creates a NewGlobalChangeLog consumer and forward all records to `receiver`. func NewGlobalChangeLog[T ChangeLogReceiver](cluster Cluster, receiver T, numPartitions int, topic string, cleanupPolicy CleanupPolicy) GlobalChangeLog[T] { return NewGlobalChangeLogWithRunStatus(sak.NewRunStatus(context.Background()), cluster, receiver, numPartitions, topic, cleanupPolicy) } // Creates a NewGlobalChangeLog consumer and forward all records to `receiver`. func NewGlobalChangeLogWithRunStatus[T ChangeLogReceiver](runStatus sak.RunStatus, cluster Cluster, receiver T, numPartitions int, topic string, cleanupPolicy CleanupPolicy) GlobalChangeLog[T] { assignments := make(map[int32]kgo.Offset) for i := 0; i < numPartitions; i++ { assignments[int32(i)] = kgo.NewOffset().AtStart() } client, err := NewClient( cluster, kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{ topic: assignments, }), kgo.RecordPartitioner(kgo.ManualPartitioner()), ) if err != nil { panic(err) } return GlobalChangeLog[T]{ runStatus: runStatus, client: client, receiver: receiver, numPartitions: numPartitions, topic: topic, cleanupPolicy: cleanupPolicy, } } // Pauses consumption of all partitions. func (cl GlobalChangeLog[T]) PauseAllPartitions() { allPartitions := make([]int32, int(cl.numPartitions)) for i := range allPartitions { allPartitions[i] = int32(i) } cl.client.PauseFetchPartitions(map[string][]int32{cl.topic: allPartitions}) } // Pauses consumption of a partition. func (cl GlobalChangeLog[T]) Pause(partition int32) { cl.client.PauseFetchPartitions(map[string][]int32{cl.topic: {partition}}) } // Resumes consumption of a partition at offset. func (cl GlobalChangeLog[T]) ResumePartitionAt(partition int32, offset int64) { cl.client.SetOffsets(map[string]map[int32]kgo.EpochOffset{ cl.topic: {partition: kgo.EpochOffset{ Offset: offset, Epoch: -1, }}, }) cl.client.ResumeFetchPartitions(map[string][]int32{cl.topic: {partition}}) } func (cl GlobalChangeLog[T]) Stop() { cl.runStatus.Halt() } func (cl GlobalChangeLog[T]) Start() { go cl.consume() } func (cl GlobalChangeLog[T]) consume() { for cl.runStatus.Running() { ctx, f := pollConsumer(cl.client) if f.IsClientClosed() { log.Debugf("GlobalChangeLog client closed") return } for _, err := range f.Errors() { if err.Err != ctx.Err() { log.Errorf("%v", err) } } f.EachRecord(cl.forwardChange) } log.Debugf("GlobalChangeLog halted") cl.client.Close() } func (cl GlobalChangeLog[T]) forwardChange(r *kgo.Record) { ir := newIncomingRecord(r) if err := cl.receiver.ReceiveChange(ir); err != nil { log.Errorf("GlobalChangeLog error for %+v, offset: %d, recordType: %s, error: %v", ir.TopicPartition(), ir.Offset(), ir.RecordType(), err) } }
137
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "math/rand" "github.com/google/btree" "github.com/twmb/franz-go/pkg/kgo" "github.com/twmb/franz-go/pkg/kmsg" ) // sort function for inactiveMembers btree // by sorting by leftAt, we drain one host at a time func incrGroupInactiveLess(a, b *incrGroupMember) bool { res := a.meta.LeftAt - b.meta.LeftAt if res != 0 { return res < 0 } return a.member.MemberID < b.member.MemberID } // sort function for activeMembers btree func incrGroupActiveLess(a, b *incrGroupMember) bool { res := a.activeAndPendingAssignments() - b.activeAndPendingAssignments() if res != 0 { return res < 0 } // we likely have partitions in receivership, so we have less priority // meaning we are les likely to donate a partition res = (a.donatable.Len() + a.donating.Len()) - (b.donatable.Len() + b.donating.Len()) if res != 0 { return res < 0 } return a.member.MemberID < b.member.MemberID } // withoutPartition - make our topic partition list act like a set without the headache of converting // deserialized array into a set and vice versa // the `n` is small here, so it's not a performance issue func withoutPartition(tps []TopicPartition, partition int32) []TopicPartition { newTps := make([]TopicPartition, 0, len(tps)) for _, tp := range tps { if tp.Partition != partition { newTps = append(newTps, tp) } } return newTps } type groupState struct { inReceivership *btree.BTreeG[int32] inTransition *btree.BTreeG[int32] unassigned map[int32]struct{} assigned map[int32]*incrGroupMember activeMembers *btree.BTreeG[*incrGroupMember] inactiveMembers *btree.BTreeG[*incrGroupMember] preparing map[TopicPartition]*incrGroupMember ready map[TopicPartition]*incrGroupMember members map[string]*incrGroupMember partitionCount int32 topic string } func newGroupState(cb *kgo.ConsumerBalancer, partitionCount int32, topic string) groupState { gs := groupState{ inReceivership: btree.NewOrderedG[int32](16), inTransition: btree.NewOrderedG[int32](16), unassigned: make(map[int32]struct{}, partitionCount), assigned: make(map[int32]*incrGroupMember), activeMembers: btree.NewG(16, incrGroupActiveLess), inactiveMembers: btree.NewG(16, incrGroupInactiveLess), preparing: make(map[TopicPartition]*incrGroupMember), ready: make(map[TopicPartition]*incrGroupMember), members: make(map[string]*incrGroupMember), partitionCount: partitionCount, topic: topic, } for i := int32(0); i < partitionCount; i++ { gs.unassigned[i] = struct{}{} } cb.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { incrMem := &incrGroupMember{ member: member, assignments: btree.NewOrderedG[int32](16), donatable: btree.NewOrderedG[int32](16), donating: btree.NewOrderedG[int32](16), // if meta data is not supplied, assume an active member meta: IncrGroupMemberMeta{Status: ActiveMember}, topic: topic, } gs.members[member.MemberID] = incrMem gs.initReceivership(incrMem, member, meta) gs.initAssignments(incrMem, meta) }) // if a partition is in receivership, it is not eligible to be donated // we need to adjust mem.donatable by subtraction everthing scheduled to be moved gs.inReceivership.Ascend(func(p int32) bool { if mem, ok := gs.assigned[p]; ok { mem.donatable.Delete(p) mem.donating.ReplaceOrInsert(p) } return true }) // now that we have initial state set, our sort is stable and we can insrt into our btrees for _, incrMem := range gs.members { if incrMem.meta.Status == InactiveMember { gs.inactiveMembers.ReplaceOrInsert(incrMem) } else { gs.activeMembers.ReplaceOrInsert(incrMem) } } log.Debugf("activeMembers: %d, inactiveMembers: %d", gs.activeMembers.Len(), gs.inactiveMembers.Len()) return gs } func (gs groupState) initAssignments(incrMem *incrGroupMember, meta *kmsg.ConsumerMemberMetadata) { for _, owned := range meta.OwnedPartitions { if gs.topic == owned.Topic { for _, p := range owned.Partitions { delete(gs.unassigned, p) if owner, ok := gs.assigned[p]; ok { // somehow we ended up double assigning a partiton ...going to take some investigation // we'll probably need to do some more complex collision detection here // this seems to happen when a member leaves mid-rebalance // for now, we will just reject this, but it may cause some latency spikes if we're using state stores // update: this seems to have been fixed by ensuring that inactive members with Preparing or Ready partitions // are not counted for the purposes for being `In Receivership`. see comments in `initReceivership()` // However, let's leave this here in case there are other oddball conditions log.Errorf("GroupState collision for partition: %d, rejected: %s, owner: %s", p, incrMem.member.MemberID, owner.member.MemberID) } else { gs.assigned[p] = incrMem incrMem.assignments.ReplaceOrInsert(p) incrMem.donatable.ReplaceOrInsert(p) } } // we're breaking because all partitions will have the same assignment // for all topics in this iteration of the groupState // if we continue, we may will up with duplicates break } } } func (gs groupState) initReceivership(incrMem *incrGroupMember, member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { if len(meta.UserData) > 0 { // will populate the correct member status if supplied if err := json.Unmarshal(meta.UserData, &incrMem.meta); err != nil { log.Errorf("%v", err) } else if incrMem.meta.Status == ActiveMember { // only put Ready/Preparing in receivership if the member is active // they member may have left mid cycle for _, tp := range incrMem.meta.Preparing { gs.preparing[tp] = incrMem } for _, tp := range incrMem.meta.Ready { gs.ready[tp] = incrMem } for _, tp := range incrMem.meta.Preparing { gs.inReceivership.ReplaceOrInsert(tp.Partition) } for _, tp := range incrMem.meta.Ready { gs.inReceivership.ReplaceOrInsert(tp.Partition) } } else { incrMem.instructions.Forget = append(incrMem.instructions.Forget, incrMem.meta.Preparing...) incrMem.instructions.Forget = append(incrMem.instructions.Forget, incrMem.meta.Ready...) } } log.Debugf("receievd %s, userMeta: %+v", incrMem.member.MemberID, incrMem.meta) } func (gs groupState) printGroupState(label string) { log.Debugf(label) min, _ := gs.activeMembers.Min() max, _ := gs.activeMembers.Max() log.Debugf("min: ", min.member.MemberID) log.Debugf("max: ", max.member.MemberID) gs.activeMembers.Ascend(func(incrMem *incrGroupMember) bool { log.Debugf("%v", incrMem) return true }) gs.inactiveMembers.Ascend(func(incrMem *incrGroupMember) bool { log.Debugf("%v", incrMem) return true }) log.Debugf(label) } func (gs groupState) balance(shifts int) bool { // first enure that all unassigned partitions find a home gs.printGroupState("intial goup state") gs.distributeUnassigned() // if there are any scheduled moves, make the assignment gs.printGroupState("post distributeUnassigned group state") gs.deliverScheduledPartitionMoves() // now schedule future moves // tell the recipient to prepare for the partitions // when it is ready, it will trigger another rebalance // if we've already moved our budget of partitions, this will be a no-op // until the next scheduled rebalance, time determined by configuration log.Debugf("schedulePartitionMoves - budget: %d, (max: %d, inReceivership: %d, inTransition: %d)", shifts-gs.inReceivership.Len(), shifts, gs.inReceivership.Len(), gs.inTransition.Len()) gs.printGroupState("post deliverScheduledPartitionMoves group state") return gs.schedulePartitionMoves(shifts - gs.inReceivership.Len() - gs.inTransition.Len()) } func (gs groupState) assignToActiveMember(partition int32, recipient *incrGroupMember) { tp := ntp(partition, gs.topic) log.Debugf("assignToActiveMember: %v, partition: %d", recipient, partition) delete(gs.ready, tp) delete(gs.preparing, tp) gs.activeMembers.Delete(recipient) recipient.assignments.ReplaceOrInsert(partition) recipient.donatable.ReplaceOrInsert(partition) recipient.meta.Preparing = withoutPartition(recipient.meta.Preparing, partition) recipient.meta.Ready = withoutPartition(recipient.meta.Ready, partition) recipient.instructions.Prepare = withoutPartition(recipient.instructions.Prepare, partition) gs.activeMembers.ReplaceOrInsert(recipient) gs.inReceivership.ReplaceOrInsert(partition) gs.inTransition.ReplaceOrInsert(partition) } func (gs groupState) instructActiveMemberToPrepareFor(partition int32, recipient *incrGroupMember) { log.Debugf("instructActiveMemberToPrepareFor: %v, partition: %d", recipient, partition) gs.activeMembers.Delete(recipient) recipient.instructions.Prepare = append(recipient.instructions.Prepare, ntp(partition, gs.topic)) gs.activeMembers.ReplaceOrInsert(recipient) } func (gs groupState) unassignFromDonor(partition int32, theDonor *incrGroupMember) { log.Debugf("unassignFromDonor: %v, partition: %d", theDonor, partition) if _, ok := gs.inactiveMembers.Get(theDonor); ok { theDonor.assignments.Delete(partition) theDonor.donatable.Delete(partition) theDonor.donating.ReplaceOrInsert(partition) } else { // gs.activeMembers.Delete(theDonor) theDonor.assignments.Delete(partition) theDonor.donatable.Delete(partition) theDonor.donating.ReplaceOrInsert(partition) // gs.activeMembers.ReplaceOrInsert(theDonor) } } func (gs groupState) distributeUnassigned() int { if gs.activeMembers.Len() == 0 { return 0 } for p := range gs.unassigned { // start here. If there was an abnormal shutdown, nobidy may be ready for this partition // in this case, we're sending the partition to the consumer with the least #partitions recipient, _ := gs.activeMembers.Min() // first check to see if there is any member who is ready, or getting ready for this partition gs.activeMembers.Ascend(func(candidate *incrGroupMember) bool { if candidate.meta.isPreparingOrReadyFor(p, gs.topic) { recipient = candidate return false } return true }) gs.assignToActiveMember(p, recipient) } return len(gs.unassigned) } func (gs groupState) deliverScheduledPartitionMoves() { // we're assigning all topics for the same partition fullyReady := make(map[int32]*incrGroupMember) for tp, incrMem := range gs.ready { // check to see if it is ready for all topics in the case of a multi-topic consumer if incrMem.meta.isReadyFor(tp.Partition, gs.topic) { fullyReady[tp.Partition] = incrMem } } for partition, recipient := range fullyReady { if donor, ok := gs.assigned[partition]; ok { gs.unassignFromDonor(partition, donor) gs.assignToActiveMember(partition, recipient) } } } func randomPartition(donor *incrGroupMember) int32 { i := rand.Intn(donor.donatable.Len()) current := 0 var p int32 donor.donatable.Ascend(func(item int32) bool { if current == i { p = item return false } current++ return true }) return p } func memberAt(tree *btree.BTreeG[*incrGroupMember], index int) (mem *incrGroupMember, ok bool) { i := 0 tree.Ascend(func(item *incrGroupMember) bool { if index == i { mem = item ok = true return false } i++ return true }) return } // begins the transition process for a partition // the partition is chososen randomly from the donor // some extra logic may be needed here to make sure we're not donating a partition that is in transition already // this could only happen if our maxIntranstionPartions > 1 which is not recommended func (gs groupState) donatePartition(donor *incrGroupMember, recipient *incrGroupMember) { _, deleted := gs.activeMembers.Delete(donor) p := randomPartition(donor) if deleted { gs.activeMembers.ReplaceOrInsert(donor) } log.Debugf("donatePartition donor: %v, recipient: %v, partition: %d", donor, recipient, p) gs.instructActiveMemberToPrepareFor(p, recipient) } func (gs groupState) schedulePartitionMoves(budget int) (shouldRebalanceAgain bool) { if gs.activeMembers.Len() == 0 { return } // we just actively assigned partions, but the move has not occurred yet // remove the in transition partitions from the budget, or we will move partitions // too quickly for i := 0; budget > 0; { if gs.inactiveMembers.Len() == 0 { break } if donor, ok := memberAt(gs.inactiveMembers, i); ok { if donor.donatable.Len() == 0 { i++ continue } recipient, _ := gs.activeMembers.Min() gs.donatePartition(donor, recipient) budget-- } else { break } } var donor *incrGroupMember var recipient *incrGroupMember for budget > 0 { shouldRebalanceAgain, donor, recipient = gs.isImbalanced() if !shouldRebalanceAgain { break } gs.donatePartition(donor, recipient) budget-- } return } func (gs *groupState) isImbalanced() (shouldRebalance bool, donor *incrGroupMember, recipient *incrGroupMember) { if gs.activeMembers.Len() < 2 { return false, nil, nil } targetCount := int(gs.partitionCount) / gs.activeMembers.Len() donor, _ = gs.activeMembers.Max() recipient, _ = gs.activeMembers.Min() shouldRebalance = // firstly: the donor has something.donate donor.donatable.Len() > 1 && // secondly: does th recipient have fewer assignemnts than the target count // Also, if we have a remainder, we could still be imbalanced as follows /* example- 20 partitions and 3 consumer yields a targetCount of 6, we could end up in the following state: A -> 6 B -> 6 C -> 8 to fix this, also check donor.activeAndPendingAssignments() > targetCount+1 which will yield: A -> 6 B -> 7 C -> 7 as close to balanced as possible */ (recipient.activeAndPendingAssignments() < targetCount || donor.activeAndPendingAssignments() > targetCount+1) return }
413
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "fmt" "sort" "sync" "time" "github.com/google/btree" "github.com/twmb/franz-go/pkg/kgo" "github.com/twmb/franz-go/pkg/kmsg" ) const IncrementalCoopProtocol = "incr_coop" // The IncrementalGroupRebalancer interface is an extension to kgo.GroupBalancer. This balancer allows for slowly moving partions during // consumer topology changes. This helps reduce blast radius in the case of failures, as well as keep the inherent latency penalty of trasnistioning partitions to a minumum. type IncrementalGroupRebalancer interface { kgo.GroupBalancer // Must be called by the InstuctionHandler once a TopicPartition is ready for consumption PartitionPrepared(TopicPartition) // Must be called by the InstuctionHandler if it fails to prepare a TopicPartition it was previously instructed to prepare PartitionPreparationFailed(TopicPartition) // Must be called by the InstuctionHandler once it receives an assignment PartitionsAssigned(...TopicPartition) // Must be called by the InstuctionHandler if it wishes to leave the consumer group in a graceful fashion GracefullyLeaveGroup() <-chan struct{} } // The status of a consumer group member. type MemberStatus int const ( ActiveMember MemberStatus = iota InactiveMember Defunct ) type IncrGroupMemberInstructions struct { Prepare []TopicPartition Forget []TopicPartition // not currently used } type IncrGroupPartitionState struct { Preparing []TopicPartition Ready []TopicPartition } type IncrGroupMemberMeta struct { Preparing []TopicPartition Ready []TopicPartition Status MemberStatus LeftAt int64 } func (igmm IncrGroupMemberMeta) isPreparingOrReadyFor(partition int32, topic string) bool { return igmm.isReadyFor(partition, topic) || igmm.isPreparingFor(partition, topic) } func (igmm IncrGroupMemberMeta) isReadyFor(partition int32, topic string) bool { return containsTopicPartition(partition, topic, igmm.Ready) } func (igmm IncrGroupMemberMeta) isPreparingFor(partition int32, topic string) bool { return containsTopicPartition(partition, topic, igmm.Preparing) } func containsTopicPartition(partition int32, topic string, tps []TopicPartition) bool { tp := ntp(partition, topic) for _, candidate := range tps { if candidate == tp { return true } } return false } type planWrapper struct { plan map[string]map[string][]int32 instructions map[string]*IncrGroupMemberInstructions } type incrGroupMember struct { member *kmsg.JoinGroupResponseMember meta IncrGroupMemberMeta assignments *btree.BTreeG[int32] donatable *btree.BTreeG[int32] donating *btree.BTreeG[int32] topic string instructions IncrGroupMemberInstructions } func (igm *incrGroupMember) String() string { status := "ACTIVE" if igm.meta.Status == InactiveMember { status = "INACTIVE" } return fmt.Sprintf( "{Member: %v, Status: %s, Assigned: %v, Donatable: %v, Donating: %v, Preparing: %v, Ready: %v, ToPrepare: %v, SortValue: %v}", igm.member.MemberID, status, igm.assignments.Len(), igm.donatable.Len(), igm.donating.Len(), len(igm.meta.Preparing), len(igm.meta.Ready), len(igm.instructions.Prepare), igm.activeAndPendingAssignments()) } func (igm *incrGroupMember) activeAndPendingAssignments() int { // assignments are by partition and Preparing and Tready by TopicPartition // so determine the count by TopicPartition ... hence multiply assignments by len(topics) return igm.donatable.Len() + igm.donating.Len() + len(igm.meta.Preparing) + len(igm.meta.Ready) } type balanceWrapper struct { consumerBalancer *kgo.ConsumerBalancer } func (bw balanceWrapper) Balance(topic map[string]int32) kgo.IntoSyncAssignment { return bw.consumerBalancer.Balance(topic) } func (bw balanceWrapper) BalanceOrError(topic map[string]int32) (kgo.IntoSyncAssignment, error) { return bw.consumerBalancer.BalanceOrError(topic) } type incrementalBalanceController struct { budget int instructionHandler IncrRebalanceInstructionHandler } func (ib incrementalBalanceController) Balance(cb *kgo.ConsumerBalancer, topicData map[string]int32) kgo.IntoSyncAssignment { start := time.Now() defer func() { log.Debugf("Balance took %v", time.Since(start)) }() plan := cb.NewPlan() instructionsByMemberId := make(map[string]*IncrGroupMemberInstructions) for topic, partitionCount := range topicData { gs, imbalanced := ib.balanceTopic(cb, plan, partitionCount, topic) log.Infof("Group for %s is balanced: %v", topic, !imbalanced) for memId, incrMem := range gs.members { if instructions, ok := instructionsByMemberId[memId]; ok { instructions.Prepare = append(instructions.Prepare, incrMem.instructions.Prepare...) } else { instructions := new(IncrGroupMemberInstructions) *instructions = incrMem.instructions instructionsByMemberId[memId] = instructions } } } // plan.AdjustCooperative will make the balance a 2 step phase, // first revoke any assignments that were moved // this will force another rebalance request // at which time they will be assigned to the proper receiver plan.AdjustCooperative(cb) return planWrapper{plan.AsMemberIDMap(), instructionsByMemberId} } func (ib incrementalBalanceController) balanceTopic(cb *kgo.ConsumerBalancer, plan *kgo.BalancePlan, partitionCount int32, topic string) (groupState, bool) { gs := newGroupState(cb, partitionCount, topic) log.Infof("balancing %s, partitions: %d, member state - activeMembers: %d, inactiveMembers: %d", topic, partitionCount, gs.activeMembers.Len(), gs.inactiveMembers.Len()) if gs.activeMembers.Len() == 0 { return gs, false } imbalanced := gs.balance(ib.budget) // finally add all our decisions to the balance plan gs.inactiveMembers.Ascend(func(mem *incrGroupMember) bool { // log.Debugf("assigned for inactive member: %d", items[0].assignments.Len()) addMemberToPlan(plan, mem) return true }) gs.activeMembers.Ascend(func(mem *incrGroupMember) bool { addMemberToPlan(plan, mem) return true }) return gs, imbalanced } func (pw planWrapper) IntoSyncAssignment() []kmsg.SyncGroupRequestGroupAssignment { kassignments := make([]kmsg.SyncGroupRequestGroupAssignment, 0, len(pw.plan)) for member, assignment := range pw.plan { var kassignment kmsg.ConsumerMemberAssignment instructions := pw.instructions[member] instructionBytes, _ := json.Marshal(instructions) kassignment.UserData = instructionBytes for topic, partitions := range assignment { sort.Slice(partitions, func(i, j int) bool { return partitions[i] < partitions[j] }) assnTopic := kmsg.NewConsumerMemberAssignmentTopic() assnTopic.Topic = topic assnTopic.Partitions = partitions kassignment.Topics = append(kassignment.Topics, assnTopic) } sort.Slice(kassignment.Topics, func(i, j int) bool { return kassignment.Topics[i].Topic < kassignment.Topics[j].Topic }) syncAssn := kmsg.NewSyncGroupRequestGroupAssignment() syncAssn.MemberID = member syncAssn.MemberAssignment = kassignment.AppendTo(nil) kassignments = append(kassignments, syncAssn) } sort.Slice(kassignments, func(i, j int) bool { return kassignments[i].MemberID < kassignments[j].MemberID }) return kassignments } func addMemberToPlan(plan *kgo.BalancePlan, mem *incrGroupMember) { partitions := make([]int32, 0, mem.assignments.Len()) mem.assignments.Ascend(func(key int32) bool { partitions = append(partitions, key) return true }) plan.AddPartitions(mem.member, mem.topic, partitions) } type incrementalRebalancer struct { balancerController incrementalBalanceController quitChan chan struct{} gracefulChan chan struct{} memberStatus MemberStatus leaveTime int64 preparing TopicPartitionSet ready TopicPartitionSet instructionHandler IncrRebalanceInstructionHandler statusLock sync.Mutex } // Defines the interface needed for the IncrementalGroupRebalancer to function. // EventSource fulfills this interface. If you are using EventSource, there is nothing else for you to implement. type IncrRebalanceInstructionHandler interface { // Called by the IncrementalGroupRebalancer. Signals the instruction handler that this partition is destined for this consumer. // In the case of the EventSource, prepartion involves pre-populating the StateStore for this partition. PrepareTopicPartition(tp TopicPartition) // Called by the IncrementalGroupRebalancer. Signals the instruction handler that it is safe to forget this previously prepped TopicPartition. ForgetPreparedTopicPartition(tp TopicPartition) // // Called by the IncrementalGroupRebalancer. A valid *kgo.Client, which is on the same cluster as the Source.Topic, must be returned. Client() *kgo.Client } // Creates an IncrementalRebalancer suitatble for use by the kgo Kafka driver. In most cases, the instructionHandler is the EventSource. // `activeTransitions` defines how many partitons may be in receivership at any given point in time. // // Example, when `activeTransitions` is 1 and the grpoup stat is imbalanced // (a new member is added or a member signals it wishes to leave the group), the IncrementalGroupRebalancer will choose 1 partition to move. Once the receiver // of that partition signals it is ready for the partition, it will assign it, then choose anothe partion to move. This process continues until the group has reached a // balanced state. // // In all cases, any unassigned partitions will be assigned immediately. // If a consumer host crashes, for example, it's partitions will be assigned immediately, regardless of preparation state. // // receivership - the state of being dealt with by an official receiver. func IncrementalRebalancer(instructionHandler IncrRebalanceInstructionHandler) IncrementalGroupRebalancer { return &incrementalRebalancer{ // if we want to increase the number of max in-transition partitions, we need to do some more work. // though there will never be an occasion where partitions are left unsassigned, they may get assigned to a // consumer that is not prepared for them. once this is debugged, accept `maxIntransition int' as a // constructor argument. balancerController: incrementalBalanceController{ budget: 1, instructionHandler: instructionHandler, }, quitChan: make(chan struct{}, 1), gracefulChan: make(chan struct{}), memberStatus: ActiveMember, instructionHandler: instructionHandler, preparing: NewTopicPartitionSet(), ready: NewTopicPartitionSet(), } } // PartitionPrepared must be called once peparations for a given TopicPartion are complete. // In the case of the EventSource, it calls this method once it has finished populating the StateStore for the TopicPartition. func (ir *incrementalRebalancer) PartitionPrepared(tp TopicPartition) { ir.statusLock.Lock() defer ir.statusLock.Unlock() ir.preparing.Remove(tp) ir.ready.Insert(tp) go ir.client().ForceRebalance() } // PartitionPrepared must be called if the IncrRebalanceInstructionHandler was unable to prepare a partition it was previously instructed to do so. func (ir *incrementalRebalancer) PartitionPreparationFailed(tp TopicPartition) { ir.statusLock.Lock() defer ir.statusLock.Unlock() rebalance := false if ir.preparing.Remove(tp) { rebalance = true } if ir.ready.Remove(tp) { rebalance = true } if rebalance { go ir.client().ForceRebalance() } } // PartitionsAssigned must be called after a partition is assigned to the consumer. If you are using SourceConsumr, there is no need to inteact with this methid directly. func (ir *incrementalRebalancer) PartitionsAssigned(tps ...TopicPartition) { ir.statusLock.Lock() defer ir.statusLock.Unlock() rebalance := false for _, tp := range tps { if ir.preparing.Remove(tp) { rebalance = true } if ir.ready.Remove(tp) { rebalance = true } } if rebalance { go ir.client().ForceRebalance() } } func (ir *incrementalRebalancer) client() *kgo.Client { return ir.instructionHandler.Client() } // Signals the group that this member wishes to leave. Returns a channel which blocks until all partitions for this member have been reassigned. func (ir *incrementalRebalancer) GracefullyLeaveGroup() <-chan struct{} { ir.statusLock.Lock() defer ir.statusLock.Unlock() if ir.memberStatus == ActiveMember { ir.memberStatus = InactiveMember ir.leaveTime = time.Now().UnixMilli() time.AfterFunc(time.Second, ir.client().ForceRebalance) } return ir.gracefulChan } // Needed to fulfill the kgo.GroupBalancer interface. There should be non need to interact with this directly. func (ir *incrementalRebalancer) IsCooperative() bool { return true } // Needed to fulfill the kgo.GroupBalancer interface. There should be non need to interact with this directly. func (ir *incrementalRebalancer) ProtocolName() string { return IncrementalCoopProtocol } // Needed to fulfill the kgo.GroupBalancer interface. There should be no need to interact with this directly, // though we are hijacking this method to parse balance instructions like 'Prepare' and 'Forget' func (ir *incrementalRebalancer) ParseSyncAssignment(assignment []byte) (map[string][]int32, error) { cma := new(kmsg.ConsumerMemberAssignment) err := cma.ReadFrom(assignment) if err != nil { return nil, err } var instructions IncrGroupMemberInstructions json.Unmarshal(cma.UserData, &instructions) parsed := make(map[string][]int32, len(cma.Topics)) for _, topic := range cma.Topics { parsed[topic.Topic] = topic.Partitions } ir.statusLock.Lock() defer ir.statusLock.Unlock() if ir.memberStatus == InactiveMember { shouldClose := true for _, assignments := range parsed { if len(assignments) > 0 { shouldClose = false break } } if shouldClose { ir.memberStatus = Defunct close(ir.gracefulChan) // go ir.client().ForceRebalance() return parsed, nil } } for _, tp := range instructions.Prepare { if ir.preparing.Insert(tp) { go ir.instructionHandler.PrepareTopicPartition(tp) } } if len((instructions.Forget)) > 0 { // if we were intructed to forget a partition, we will need to force a rebalance // otherwise the group balancer could become stuck if we were in the middle of trasnferring a partition wg := &sync.WaitGroup{} wg.Add(len(instructions.Forget)) for _, tp := range instructions.Forget { prepping := ir.preparing.Remove(tp) prepped := ir.ready.Remove(tp) if prepping || prepped { go func() { ir.instructionHandler.ForgetPreparedTopicPartition(tp) wg.Done() }() } else { wg.Done() } } wg.Wait() ir.client().ForceRebalance() } return parsed, err } // Needed to fulfill the kgo.GroupBalancer interface. There should be non need to interact with this directly. func (ir *incrementalRebalancer) MemberBalancer(members []kmsg.JoinGroupResponseMember) (kgo.GroupMemberBalancer, map[string]struct{}, error) { cb, err := kgo.NewConsumerBalancer(ir.balancerController, members) return balanceWrapper{consumerBalancer: cb}, cb.MemberTopics(), err } // Needed to fulfill the kgo.GroupBalancer interface. There should be non need to interact with this directly. // We'll use the same meta data format as kgo itself (using copy-and-paste technology) and user The supplied UserData foeld to provide IncrementalRebalancer // specific data. This should allow us to be compatible with the coop_sticky that is already supplied by kgo. func (ir *incrementalRebalancer) JoinGroupMetadata(interests []string, currentAssignment map[string][]int32, generation int32) []byte { meta := kmsg.NewConsumerMemberMetadata() meta.Topics = interests meta.Version = 1 for topic, partitions := range currentAssignment { metaPart := kmsg.NewConsumerMemberMetadataOwnedPartition() metaPart.Topic = topic metaPart.Partitions = partitions meta.OwnedPartitions = append(meta.OwnedPartitions, metaPart) } // KAFKA-12898: ensure our topics are sorted metaOwned := meta.OwnedPartitions sort.Slice(metaOwned, func(i, j int) bool { return metaOwned[i].Topic < metaOwned[j].Topic }) meta.UserData = ir.userData() return meta.AppendTo(nil) } func (ir *incrementalRebalancer) userData() []byte { ir.statusLock.Lock() defer ir.statusLock.Unlock() data, _ := json.Marshal(IncrGroupMemberMeta{ Status: ir.memberStatus, LeftAt: ir.leaveTime, Preparing: ir.preparing.Items(), Ready: ir.ready.Items(), }) return data }
466
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "math/rand" "sync" "time" ) // Container for an Interjector. type interjection[T any] struct { interjector Interjector[T] every time.Duration jitter time.Duration cancelSignal chan struct{} interjectChannel chan *interjection[T] topicPartition TopicPartition timer *time.Timer callback func() cancelled bool isOneOff bool initOnce sync.Once cancelOnce sync.Once } func (ij *interjection[T]) interject(ec *EventContext[T]) ExecutionState { return ij.interjector(ec, time.Now()) } func (ij *interjection[T]) init(tp TopicPartition, c chan *interjection[T]) { ij.initOnce.Do(func() { ij.topicPartition = tp ij.cancelSignal = make(chan struct{}, 1) ij.interjectChannel = c log.Infof("Interjection initialized for %+v", ij.topicPartition) }) } // calculates the timerDuration with jitter for the next interjection interval func (ij *interjection[T]) timerDuration() time.Duration { if ij.jitter == 0 { return ij.every } jitter := time.Duration(rand.Float64() * float64(ij.jitter)) if rand.Intn(10) < 5 { return ij.every + jitter } return ij.every - jitter } // schedules the next interjection func (ij *interjection[T]) tick() { if ij.isOneOff { return } delay := ij.timerDuration() log.Tracef("Scheduling interjection for %+v in %v", ij.topicPartition, delay) ij.timer = time.AfterFunc(delay, func() { for { select { case ij.interjectChannel <- ij: log.Tracef("Interjected into %+v", ij.topicPartition) return case <-ij.cancelSignal: return } } }) } func (ij *interjection[T]) cancel() { ij.cancelOnce.Do(func() { ij.cancelled = true ij.cancelSignal <- struct{}{} ij.timer.Stop() log.Infof("Interjection stopped for %+v", ij.topicPartition) }) }
93
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "fmt" "sync" "time" "github.com/twmb/franz-go/pkg/kgo" ) type LogLevel int const ( LogLevelNone LogLevel = iota LogLevelTrace LogLevelDebug LogLevelInfo LogLevelWarn LogLevelError ) // Translate to LogLevel to kgo.LogLevel func toKgoLoglevel(level LogLevel) kgo.LogLevel { switch level { // kgo does not define Trace, let's just say Trace == Debug case LogLevelTrace, LogLevelDebug: return kgo.LogLevelDebug case LogLevelInfo: return kgo.LogLevelInfo case LogLevelWarn: return kgo.LogLevelWarn case LogLevelError: return kgo.LogLevelError } return kgo.LogLevelNone } /* Provides the interface needed by GKES to intergrate with your loggin mechanism. Example: import ( "mylogger" "github.com/aws/go-kafka-event-source/streams" ) func main() { // GKES will emit log at whatever level is defined by NewLogger() // kgo will emit logs at LogLevelError streams.InitLogger(mylogger.NewLogger(), streams.LogLevelError) } */ type Logger interface { Tracef(msg string, args ...any) Debugf(msg string, args ...any) Infof(msg string, args ...any) Warnf(msg string, args ...any) Errorf(msg string, args ...any) } // SimpleLogger implements Logger and writes to STDOUT. Good for development purposes. type SimpleLogger LogLevel type lazyTimeStampStringer struct{} func (lazyTimeStampStringer) String() string { return time.Now().UTC().Format(time.RFC3339Nano) } var lazyTimeStamp = lazyTimeStampStringer{} func (sl SimpleLogger) Tracef(msg string, args ...any) { if LogLevelTrace >= LogLevel(sl) && LogLevel(sl) != LogLevelNone { fmt.Println(lazyTimeStamp, "[TRACE] -", fmt.Sprintf(msg, args...)) } } func (sl SimpleLogger) Debugf(msg string, args ...any) { if LogLevelDebug >= LogLevel(sl) && LogLevel(sl) != LogLevelNone { fmt.Println(lazyTimeStamp, "[DEBUG] -", fmt.Sprintf(msg, args...)) } } func (sl SimpleLogger) Infof(msg string, args ...any) { if LogLevelInfo >= LogLevel(sl) && LogLevel(sl) != LogLevelNone { fmt.Println(lazyTimeStamp, "[INFO] -", fmt.Sprintf(msg, args...)) } } func (sl SimpleLogger) Warnf(msg string, args ...any) { if LogLevelWarn >= LogLevel(sl) && LogLevel(sl) != LogLevelNone { fmt.Println(lazyTimeStamp, "[WARN] -", fmt.Sprintf(msg, args...)) } } func (sl SimpleLogger) Errorf(msg string, args ...any) { if LogLevelError >= LogLevel(sl) && LogLevel(sl) != LogLevelNone { fmt.Println(lazyTimeStamp, "[ERROR] -", fmt.Sprintf(msg, args...)) } } // logWrapper allows you to utilize your own logger, but with a specific logging level for streams. type logWrapper struct { level LogLevel logger Logger } /* WrapLogger allows GKES to emit logs at a higher level than your own Logger. Useful if you need debug level logging for your own application, but want to cluuter your logs with gstream output. Example: import ( "mylogger" "github.com/aws/go-kafka-event-source/streams" ) func main() { // your application will emit logs at "Debug" // GKES will emit logs at LogLevelError // kgo will emit logs at LogLevelNone gkesLogger := streams.WrapLogger(mylogger.NewLogger("Debug"), streams.LogLevelError) streams.InitLogger(gkesLogger, streams.LogLevelNone) } */ func WrapLogger(logger Logger, level LogLevel) Logger { return logWrapper{ level: level, logger: logger, } } func (lw logWrapper) Tracef(msg string, args ...any) { if LogLevelTrace >= lw.level && lw.level != LogLevelNone { lw.logger.Tracef(msg, args...) } } func (lw logWrapper) Debugf(msg string, args ...any) { if LogLevelDebug >= lw.level && lw.level != LogLevelNone { lw.logger.Debugf(msg, args...) } } func (lw logWrapper) Infof(msg string, args ...any) { if LogLevelInfo >= lw.level && lw.level != LogLevelNone { lw.logger.Infof(msg, args...) } } func (lw logWrapper) Warnf(msg string, args ...any) { if LogLevelWarn >= lw.level && lw.level != LogLevelNone { lw.logger.Warnf(msg, args...) } } func (lw logWrapper) Errorf(msg string, args ...any) { if LogLevelError >= lw.level && lw.level != LogLevelNone { lw.logger.Errorf(msg, args...) } } var log Logger = SimpleLogger(LogLevelError) var kgoLogger kgo.Logger = kgoLogWrapper(kgo.LogLevelError) type kgoLogWrapper kgo.LogLevel func (klw kgoLogWrapper) Level() kgo.LogLevel { return kgo.LogLevel(klw) } func (klw kgoLogWrapper) Log(level kgo.LogLevel, msg string, keyvals ...interface{}) { switch level { case kgo.LogLevelDebug: log.Debugf(msg, keyvals...) case kgo.LogLevelInfo: log.Infof(msg, keyvals...) case kgo.LogLevelWarn: log.Warnf(msg, keyvals...) case kgo.LogLevelError: log.Errorf(msg, keyvals...) } } var oneLogger = sync.Once{} /* Initializes the GKES logger. `kafkaDriverLogLevel` defines the log level for the underlying kgo clients. This call should be the first interaction with the GKES module. Subsequent calls will have no effect. If never called, the default unitialized logger writes to STDOUT at LogLevelError for both GKES and kgo. Example: import "github.com/aws/go-kafka-event-source/streams" func main() { streams.InitLogger(streams.SimpleLogger(streams.LogLevelInfo), streams.LogLevelError) // ... initialize your application } */ func InitLogger(l Logger, kafkaDriverLogLevel LogLevel) Logger { oneLogger.Do(func() { log = l kgoLogger = kgoLogWrapper(toKgoLoglevel(kafkaDriverLogLevel)) }) return log }
219
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "flag" "fmt" "os" "os/exec" "testing" "time" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/google/btree" "github.com/google/uuid" ) const kafkaProgramScript = "kafka_local/exec-kafka-script.sh" const kafkaCleanupScript = "kafka_local/cleanup.sh" const kafkaDownloadScript = "kafka_local/download-kafka.sh" const kafkaWorkingDir = "kafka_local/kafka" func TestMain(m *testing.M) { flag.Parse() InitLogger(SimpleLogger(LogLevelError), LogLevelError) if testing.Short() { os.Exit(m.Run()) return } // cleanup data logs in case we exited abnormally if err := exec.Command("sh", kafkaCleanupScript).Run(); err != nil { fmt.Println(err) } // download binaly distribution of kafka if necessary if err := exec.Command("sh", kafkaDownloadScript, kafkaWorkingDir).Run(); err != nil { fmt.Println(err) } // start zookeeper and broker asynchronously zookeeper := kafkaScriptCommand("zookeeper", "start") kafka := kafkaScriptCommand("kafka", "start") if err := zookeeper.Start(); err != nil { fmt.Println("zookeeper: ", err) } if err := kafka.Start(); err != nil { fmt.Println("broker: ", err) } time.Sleep(5 * time.Second) // give some time for Kafak to warm up // run our tests code := m.Run() // stop zookeeper and broker if err := kafkaScriptCommand("zookeeper", "stop").Run(); err != nil { fmt.Println("zookeeper: ", err) } if err := kafkaScriptCommand("kafka", "stop").Run(); err != nil { fmt.Println("kafka: ", err) } // give it a second then clean up data logs time.Sleep(time.Second) if err := exec.Command("sh", kafkaCleanupScript).Run(); err != nil { fmt.Println(err) } os.Exit(code) } func kafkaScriptCommand(program, command string) *exec.Cmd { return exec.Command("sh", kafkaProgramScript, kafkaWorkingDir, program, command) } type mockAsyncCompleter struct { expectedState ExecutionState done chan struct{} t *testing.T } func (m mockAsyncCompleter) AsyncComplete(job AsyncJob[intStore]) { if state := job.Finalize(); state != m.expectedState { m.t.Errorf("incorrect ExecutionState. actual %v, expected: %v", state, m.expectedState) } if m.done != nil { m.done <- struct{}{} } } type intStoreItem struct { Key, Value int } func (isi intStoreItem) encodeKey(cle ChangeLogEntry) { IntCodec.Encode(cle.KeyWriter(), isi.Key) } func (isi intStoreItem) encodeValue(cle ChangeLogEntry) { IntCodec.Encode(cle.ValueWriter(), isi.Key) } func intStoreItemLess(a, b intStoreItem) bool { return a.Key < b.Key } type intStore struct { tree *btree.BTreeG[intStoreItem] } func decodeIntStoreItem(r IncomingRecord) (item intStoreItem, err error) { item.Key = sak.Must(IntCodec.Decode(r.Key())) if len(r.Value()) > 0 { item.Value = sak.Must(IntCodec.Decode(r.Value())) } return } func NewIntStore(TopicPartition) intStore { return intStore{btree.NewG(64, intStoreItemLess)} } func (s *intStore) decodeRecord(r IncomingRecord) (item intStoreItem, ok bool) { item.Key = sak.Must(IntCodec.Decode(r.Key())) if len(r.Value()) > 0 { item.Value = sak.Must(IntCodec.Decode(r.Value())) ok = true } return } func (s intStore) add(item intStoreItem) { s.tree.ReplaceOrInsert(item) } func (s intStore) del(item intStoreItem) { s.tree.Delete(item) } func (s intStore) ReceiveChange(r IncomingRecord) error { if item, ok := s.decodeRecord(r); ok { s.add(item) } else { s.del(item) } return nil } func (s intStore) Revoked() { s.tree.Clear(false) } var testCluster = SimpleCluster([]string{"127.0.0.1:9092"}) func testTopicConfig() EventSourceConfig { topicName := uuid.NewString() return EventSourceConfig{ GroupId: topicName + "_group", Topic: topicName, StateStoreTopic: topicName + "_store", NumPartitions: 10, ReplicationFactor: 1, MinInSync: 1, SourceCluster: testCluster, } } func defaultTestHandler(ec *EventContext[intStore], ir IncomingRecord) ExecutionState { s := ec.Store() cle := NewChangeLogEntry().WithEntryType("defaultHandler") if item, ok := s.decodeRecord(ir); ok { s.add(item) item.encodeKey(cle) item.encodeValue(cle) } else { item.encodeKey(cle) s.del(item) } ec.RecordChange(cle) return Complete } type testProducer struct { producer *Producer } func (p testProducer) signal(t *testing.T, v string, partition int32) { p.producer.ProduceAsync(context.TODO(), NewRecord().WithRecordType("verify").WithValue([]byte(v)).WithPartition(partition), func(r *Record, err error) { if err != nil { t.Error(err) t.FailNow() } }) } func (p testProducer) produce(t *testing.T, recordType string, k, v int) { r := NewRecord(). WithRecordType(recordType). WithPartition(int32(k % 10)) IntCodec.Encode(r.KeyWriter(), k) IntCodec.Encode(r.ValueWriter(), v) p.producer.ProduceAsync(context.TODO(), r, func(r *Record, err error) { if err != nil { t.Error(err) t.FailNow() } }) } func (p testProducer) delete(t *testing.T, recordType string, k int) { r := NewRecord(). WithRecordType(recordType). WithPartition(int32(k % 10)) IntCodec.Encode(r.KeyWriter(), k) p.producer.ProduceAsync(context.TODO(), r, func(r *Record, err error) { if err != nil { t.Error(err) t.FailNow() } }) } func (p testProducer) produceMany(t *testing.T, recordType string, count int) { for i := 0; i < count; i++ { p.produce(t, recordType, i, i) } } func (p testProducer) deleteMany(t *testing.T, recordType string, count int) { for i := 0; i < count; i++ { p.delete(t, recordType, i) } } func newTestEventSource() (*EventSource[intStore], testProducer, <-chan string) { c := make(chan string) cfg := testTopicConfig() es := sak.Must(NewEventSource(cfg, NewIntStore, defaultTestHandler)) producer := NewProducer(es.source.AsDestination()) RegisterEventType(es, func(ir IncomingRecord) (string, error) { return string(ir.Value()), nil }, func(ec *EventContext[intStore], v string) ExecutionState { c <- v return Complete }, "verify") return es, testProducer{producer}, c } const defaultTestTimeout = 30 * time.Second func waitForVerificationSignal(t *testing.T, c <-chan string, timeout time.Duration) (string, bool) { if timeout == 0 { timeout = defaultTestTimeout } timer := time.NewTimer(timeout) select { case s := <-c: return s, true case <-timer.C: t.Errorf("deadline exceeded") t.FailNow() return "", false } } func (p testProducer) waitForAllPartitions(t *testing.T, c <-chan string, timeout time.Duration) bool { partitionCount := p.producer.destination.NumPartitions for partition := 0; partition < partitionCount; partition++ { p.waitForPartition(t, c, timeout, int32(partition)) } return true } func (p testProducer) waitForPartition(t *testing.T, c <-chan string, timeout time.Duration, partition int32) (string, bool) { p.signal(t, "waitForPartition", int32(partition)) return waitForVerificationSignal(t, c, timeout) }
297
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import "time" const TxnCommitOperation = "TxnCommit" const PartitionPreppedOperation = "PartitionPrepped" type Metric struct { StartTime time.Time ExecuteTime time.Time EndTime time.Time Count int Bytes int PartitionCount int Partition int32 Operation string Topic string GroupId string } func (m Metric) Duration() time.Duration { return m.EndTime.Sub(m.StartTime) } func (m Metric) Linger() time.Duration { return m.ExecuteTime.Sub(m.StartTime) } func (m Metric) ExecuteDuration() time.Duration { return m.EndTime.Sub(m.ExecuteTime) }
46
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "testing" "github.com/twmb/franz-go/pkg/kgo" ) type fixedPartitoner struct { partition int32 } func (fp fixedPartitoner) ForTopic(topic string) kgo.TopicPartitioner { return fp } func (fp fixedPartitoner) Partition(r *kgo.Record, n int) int { return int(fp.partition) } func (fp fixedPartitoner) RequiresConsistency(*kgo.Record) bool { return true } func TestOptionalPartitioner(t *testing.T) { partitioner := NewOptionalPerTopicPartitioner(fixedPartitoner{10}, map[string]kgo.Partitioner{ "A": fixedPartitoner{1}, "B": fixedPartitoner{2}, }) assigned := NewRecord().WithPartition(11) unassigned := NewRecord() partitionerA := partitioner.ForTopic("A") partitionerB := partitioner.ForTopic("B") defaultPartitioner := partitioner.ForTopic("UNKNOWN") p := partitionerA.Partition(assigned.toKafkaRecord(), 100) if p != 11 { t.Errorf("Incorrect partition. actual: %d, expected: %d", p, 11) } p = partitionerA.Partition(unassigned.toKafkaRecord(), 100) if p != 1 { t.Errorf("Incorrect partition. actual: %d, expected: %d", p, 1) } p = partitionerB.Partition(unassigned.toKafkaRecord(), 100) if p != 2 { t.Errorf("Incorrect partition. actual: %d, expected: %d", p, 2) } p = defaultPartitioner.Partition(unassigned.toKafkaRecord(), 100) if p != 10 { t.Errorf("Incorrect partition. actual: %d, expected: %d", p, 10) } }
72
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "sync" "github.com/twmb/franz-go/pkg/kgo" ) type changeLogData[T any] struct { store T topic string } type changeLogPartition[T StateStore] changeLogData[T] func (sp changeLogPartition[T]) grab() T { return sp.store } // we just need to untype this generic constraint from `StateStore“ to `any“ // to ease up some type gymnastics downstream func (sp changeLogPartition[T]) changeLogData() changeLogData[T] { return changeLogData[T](sp) } // no-op for now. We may need some locking in the future if we do local state store txns. func (sp changeLogPartition[T]) release() {} func (sp changeLogPartition[T]) receiveChangeInternal(record *kgo.Record) error { // this is only called during partition prep, so locking is not necessary // this will improve performance a bit err := sp.store.ReceiveChange(newIncomingRecord(record)) if err != nil { log.Errorf("Error receiving change on topic: %s, partition: %d, offset: %d, err: %v", record.Topic, record.Partition, record.Offset, err) } return err } func (sp changeLogPartition[T]) revokedInternal() { sp.grab().Revoked() sp.release() } type partitionedChangeLog[T StateStore] struct { data map[int32]changeLogPartition[T] factory TopicPartitionCallback[T] changeLogTopic string mux sync.Mutex } func newPartitionedChangeLog[T StateStore](factory TopicPartitionCallback[T], changeLogTopic string) *partitionedChangeLog[T] { return &partitionedChangeLog[T]{ changeLogTopic: changeLogTopic, data: make(map[int32]changeLogPartition[T]), factory: factory} } func (ps *partitionedChangeLog[T]) Len() int { return len(ps.data) } func (ps *partitionedChangeLog[T]) assign(partition int32) changeLogPartition[T] { ps.mux.Lock() defer ps.mux.Unlock() var ok bool var sp changeLogPartition[T] log.Debugf("PartitionedStore assigning %d", partition) if sp, ok = ps.data[partition]; !ok { sp = changeLogPartition[T]{ store: ps.factory(ntp(partition, ps.changeLogTopic)), topic: ps.changeLogTopic, } ps.data[partition] = sp } return sp } func (ps *partitionedChangeLog[T]) revoke(partition int32) { ps.mux.Lock() defer ps.mux.Unlock() log.Debugf("PartitionedStore revoking %d", partition) if store, ok := ps.data[partition]; ok { delete(ps.data, partition) store.revokedInternal() } }
102
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "fmt" "sync" "sync/atomic" "time" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/twmb/franz-go/pkg/kgo" ) // Returned by an EventProcessor or Interjector in response to an EventContext. ExecutionState // should not be conflated with concepts of error state, such as Success or Failure. type ExecutionState int const ( // Complete signals the EventSource that the event or interjection is completely processed. // Once Complete is returned, the offset for the associated EventContext will be commited. Complete ExecutionState = 0 // Incomplete signals the EventSource that the event or interjection is still ongoing, and // that your application promises to fulfill the EventContext in the future. // The offset for the associated EventContext will not be commited. Incomplete ExecutionState = 1 Fatal ExecutionState = 2 unknownType ExecutionState = 3 ) type AsyncJob[T any] struct { ctx *EventContext[T] finalizer func() ExecutionState } func (aj AsyncJob[T]) Finalize() ExecutionState { return aj.finalizer() } type asyncCompleter[T any] struct { asyncJobs chan AsyncJob[T] } func (ac asyncCompleter[T]) AsyncComplete(j AsyncJob[T]) { ac.asyncJobs <- j } type partitionWorker[T StateStore] struct { eosProducer *eosProducerPool[T] partitionInput chan []*kgo.Record maxPending chan struct{} interjectionInput chan *interjection[T] eventInput chan *EventContext[T] interjectionEventInput chan *EventContext[T] asyncCompleter asyncCompleter[T] stopSignal chan struct{} revokedSignal chan struct{} stopped chan struct{} changeLog changeLogPartition[T] eventSource *EventSource[T] runStatus sak.RunStatus ready int64 highestOffset int64 topicPartition TopicPartition revocationWaiter sync.WaitGroup } func newPartitionWorker[T StateStore]( eventSource *EventSource[T], topicPartition TopicPartition, commitLog *eosCommitLog, changeLog changeLogPartition[T], eosProducer *eosProducerPool[T], waiter func()) *partitionWorker[T] { eosConfig := eventSource.source.config.EosConfig recordsInputSize := sak.Max(eosConfig.MaxBatchSize/10, 100) asyncSize := recordsInputSize * 4 pw := &partitionWorker[T]{ eventSource: eventSource, topicPartition: topicPartition, changeLog: changeLog, eosProducer: eosProducer, stopSignal: make(chan struct{}), revokedSignal: make(chan struct{}, 1), stopped: make(chan struct{}), maxPending: make(chan struct{}, eosProducer.maxPendingItems()), asyncCompleter: asyncCompleter[T]{ asyncJobs: make(chan AsyncJob[T], asyncSize), }, partitionInput: make(chan []*kgo.Record, 4), eventInput: make(chan *EventContext[T], recordsInputSize), interjectionInput: make(chan *interjection[T], 1), interjectionEventInput: make(chan *EventContext[T], 1), runStatus: eventSource.runStatus.Fork(), highestOffset: -1, } go pw.work(pw.eventSource.interjections, waiter, commitLog) return pw } func (pw *partitionWorker[T]) canInterject() bool { return atomic.LoadInt64(&pw.ready) != 0 } func (pw *partitionWorker[T]) add(records []*kgo.Record) { if pw.isRevoked() { return } // atomic.AddInt64(&pw.pending, int64(len(records))) pw.partitionInput <- records } func (pw *partitionWorker[T]) revoke() { pw.runStatus.Halt() } type sincer struct { then time.Time } func (s sincer) String() string { return fmt.Sprintf("%v", time.Since(s.then)) } func (pw *partitionWorker[T]) pushRecords() { for { select { case records := <-pw.partitionInput: if !pw.isRevoked() { pw.scheduleTxnAndExecution(records) } case ij := <-pw.interjectionInput: pw.scheduleInterjection(ij) case <-pw.runStatus.Done(): log.Debugf("Closing worker for %+v", pw.topicPartition) pw.stopSignal <- struct{}{} <-pw.stopped close(pw.partitionInput) close(pw.eventInput) close(pw.asyncCompleter.asyncJobs) log.Debugf("Closed worker for %+v", pw.topicPartition) return } } } func (pw *partitionWorker[T]) scheduleTxnAndExecution(records []*kgo.Record) { if pw.isRevoked() { return } pw.revocationWaiter.Add(len(records)) // optimistically do one add call for _, record := range records { if record != nil && record.Offset >= pw.highestOffset { ec := newEventContext(pw.runStatus.Ctx(), record, pw.changeLog.changeLogData(), pw) pw.maxPending <- struct{}{} pw.eosProducer.addEventContext(ec) pw.eventInput <- ec } else { pw.revocationWaiter.Done() // in the rare occasion this is a stale evetn, decrement the revocation waiter } // this is needed as, when under load, the record input may starve out interjections // which have a very small input buffer pw.interleaveInterjection() } } func (pw *partitionWorker[T]) interleaveInterjection() { select { case ij := <-pw.interjectionInput: pw.scheduleInterjection(ij) default: } } func (pw *partitionWorker[T]) scheduleInterjection(inter *interjection[T]) { if pw.isRevoked() { if inter.callback != nil { inter.callback() } return } pw.revocationWaiter.Add(1) ec := newInterjectionContext(pw.runStatus.Ctx(), inter, pw.topicPartition, pw.changeLog.changeLogData(), pw) pw.maxPending <- struct{}{} pw.eosProducer.addEventContext(ec) pw.interjectionEventInput <- ec } func (pw *partitionWorker[T]) work(interjections []interjection[T], waiter func(), commitLog *eosCommitLog) { elapsed := sincer{time.Now()} // the partition is not ready to receive events as it is still bootstrapping the state store. // in the case where this partition was assigned due to a failure on another consumer, this could be a lengthy process // if we continue to consume events for this partition, we will fill it's input buffer // and block other partitions on this consumer. pause the partition until tghe state store is bootstrapped pw.eventSource.consumer.Client().PauseFetchPartitions(map[string][]int32{ pw.topicPartition.Topic: {pw.topicPartition.Partition}, }) // don't start consuming until this function returns // this function will block until all changelogs for this partition are populated pw.highestOffset = commitLog.lastProcessed(pw.topicPartition) log.Debugf("partitionWorker initialized %+v with lastProcessed offset: %d in %v", pw.topicPartition, pw.highestOffset, elapsed) waiter() pw.eventSource.consumer.Client().ResumeFetchPartitions(map[string][]int32{ pw.topicPartition.Topic: {pw.topicPartition.Partition}, }) // resume partition if it was paused go pw.pushRecords() atomic.StoreInt64(&pw.ready, 1) log.Debugf("partitionWorker activated %+v in %v, interjectionCount: %d", pw.topicPartition, elapsed, len(interjections)) ijPtrs := sak.ToPtrSlice(interjections) for _, ij := range ijPtrs { ij.init(pw.topicPartition, pw.interjectionInput) ij.tick() } pw.eventSource.source.onPartitionActivated(pw.topicPartition.Partition) for { select { case ec := <-pw.eventInput: pw.handleEvent(ec) case ec := <-pw.interjectionEventInput: pw.handleInterjection(ec) case job := <-pw.asyncCompleter.asyncJobs: pw.processAsyncJob(job) case <-pw.stopSignal: for _, ij := range ijPtrs { ij.cancel() } go pw.waitForRevocation() case <-pw.revokedSignal: pw.stopped <- struct{}{} return } } } func (pw *partitionWorker[T]) waitForRevocation() { pw.revocationWaiter.Wait() // wait until all pending events have been accpted by a producerNode pw.revokedSignal <- struct{}{} } func (pw *partitionWorker[T]) processAsyncJob(job AsyncJob[T]) { if job.Finalize() == Complete { job.ctx.complete() <-pw.maxPending } } func (pw *partitionWorker[T]) isRevoked() bool { return !pw.runStatus.Running() } func (pw *partitionWorker[T]) handleInterjection(ec *EventContext[T]) { inter := ec.interjection pw.assignProducer(ec) if ec.producer == nil { <-pw.maxPending if inter.callback != nil { inter.callback() // we need to close off 1-off interjections to prevent sourceConsumer from hanging } } else if ec.producer != nil && inter.interject(ec) == Complete { ec.complete() <-pw.maxPending inter.tick() } } func (pw *partitionWorker[T]) handleEvent(ec *EventContext[T]) bool { pw.forwardToEventSource(ec) return true } func (pw *partitionWorker[T]) assignProducer(ec *EventContext[T]) { // if we stop processing async completions while waiting for a producer // we could eventually dealock with the eos producer // if nothing is yet available, go ahead and process an asyncJob for { select { case ec.producer = <-ec.producerChan: return case job := <-pw.asyncCompleter.asyncJobs: pw.processAsyncJob(job) } } } func (pw *partitionWorker[T]) forwardToEventSource(ec *EventContext[T]) { pw.assignProducer(ec) if ec.producer == nil { // if we're revoked, don't even add this to the onDeck producer <-pw.maxPending return } offset := ec.Offset() pw.highestOffset = offset + 1 record, _ := ec.Input() if pw.eventSource.handleEvent(ec, record) == Complete { ec.complete() <-pw.maxPending } }
318
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "sync" "sync/atomic" "github.com/twmb/franz-go/pkg/kgo" ) type Destination struct { // The topic to use for records being produced which have empty topic data DefaultTopic string // Optional, used in CreateDestination call. NumPartitions int // Optional, used in CreateDestination call. ReplicationFactor int // Optional, used in CreateDestination call. MinInSync int // The Kafka cluster where this destination resides. Cluster Cluster } // A simple kafka producer type Producer struct { client *kgo.Client destination Destination } // Create a new Producer. Destination provides cluster connect information. // Defaults options are: kgo.ProducerLinger(5 * time.Millisecond) and // kgo.RecordPartitioner(NewOptionalPartitioner(kgo.StickyKeyPartitioner(nil))) func NewProducer(destination Destination, opts ...kgo.Opt) *Producer { client, err := NewClient(destination.Cluster, opts...) if err != nil { panic(err) } p := &Producer{ client: client, destination: destination, } return p } // Produces a record, blocking until complete. // If the record has not topic, the DefaultTopic of the producer's Destination will be used. func (p *Producer) Produce(ctx context.Context, record *Record) (err error) { wg := &sync.WaitGroup{} wg.Add(1) p.ProduceAsync(ctx, record, func(_ *Record, kErr error) { err = kErr wg.Done() }) wg.Wait() return } // Produces a record asynchronously. If callback is non-nill, it will be executed `callback` when the call is complete. // If the record has not topic, the DefaultTopic of the producer's Destination will be used. func (p *Producer) ProduceAsync(ctx context.Context, record *Record, callback func(*Record, error)) { if len(record.kRecord.Topic) == 0 { record = record.WithTopic(p.destination.DefaultTopic) } p.client.Produce(ctx, record.toKafkaRecord(), func(r *kgo.Record, kErr error) { if callback != nil { callback(record, kErr) } }) } func (p *Producer) Close() { p.client.Close() } type BatchProducer[S any] struct { client *kgo.Client destination Destination } // Provides similar functionality to [AsyncBatcher], but in the context of producing Kafka records. // Since the underlying Kafka producer already batches in an ordered fashion, there is no need to add the overhead of the [AsyncBatcher]. // Records produced by a BatchProducer are not transactional, and therefore duplicates could be created. // The use cases for the BatchProducer vs EventContext.Forward are as follows: // // - The topic you are producing to is not on the same Kafka cluster as your EventSource // // - Duplicates are OK and you do not want to wait for the transaction to complete before the consumers of these records can see the data (lower latency) // // If your use case does not fall into the above buckets, it is recommended to just use [EventConetxt.Forward] func NewBatchProducer[S any](destination Destination, opts ...kgo.Opt) *BatchProducer[S] { client, err := NewClient(destination.Cluster, opts...) if err != nil { panic(err) } p := &BatchProducer[S]{ client: client, destination: destination, } return p } type produceBatcher[S any] struct { ctx *EventContext[S] records []*Record pending int64 callback BatchProducerCallback[S] userData any } func (b *produceBatcher[S]) Key() TopicPartition { return b.ctx.TopicPartition() } func (b *produceBatcher[S]) cleanup() { for _, r := range b.records { r.Release() } b.records = nil } func (b *produceBatcher[S]) recordComplete() { if atomic.AddInt64(&b.pending, -1) == 0 && b.callback != nil { b.ctx.AsyncJobComplete(b.executeCallback) } } func (b *produceBatcher[S]) executeCallback() ExecutionState { state := b.callback(b.ctx, b.records, b.userData) b.cleanup() return state } // Produces `records` and invokes BatchProducerCallback once all records have been produced or have errored out. // If there was an error in producing, it can be retrieved with record.Error() // // It is important to note that GKES uses a Record pool. After the transaction has completed for this record, it is returned to the pool for reuse. // Your application should not hold on to references to the Record(s) after BatchProducerCallback has been invoked. func (p *BatchProducer[S]) Produce(ec *EventContext[S], records []*Record, cb BatchProducerCallback[S], userData any) ExecutionState { b := &produceBatcher[S]{ ctx: ec, records: records, callback: cb, pending: int64(len(records)), userData: userData, } p.produceBatch(b) return Incomplete } func (p *BatchProducer[S]) produceBatch(b *produceBatcher[S]) { for _, record := range b.records { if len(record.kRecord.Topic) == 0 { record = record.WithTopic(p.destination.DefaultTopic) } p.produceRecord(b, record) } } func (p *BatchProducer[S]) produceRecord(b *produceBatcher[S], record *Record) { p.client.Produce(context.TODO(), record.toKafkaRecord(), func(kr *kgo.Record, err error) { record.err = err b.recordComplete() }) }
179
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "bytes" "time" "unsafe" "github.com/aws/go-kafka-event-source/streams/sak" jsoniter "github.com/json-iterator/go" "github.com/twmb/franz-go/pkg/kgo" ) var json = jsoniter.ConfigCompatibleWithStandardLibrary // The record.Header key that GKES uses to transmit type information about an IncomingRecord or a ChangeLogEntry. const RecordTypeHeaderKey = "__grt__" // let's keep it small. every byte counts const AutoAssign = int32(-1) func recordSize(r kgo.Record) int { byteCount := len(r.Key) byteCount += len(r.Value) for _, h := range r.Headers { byteCount += len(h.Key) byteCount += len(h.Value) } return byteCount } type Record struct { keyBuffer *bytes.Buffer valueBuffer *bytes.Buffer kRecord kgo.Record recordType string err error } var recordPool = sak.NewPool(30000, func() *Record { return &Record{ kRecord: kgo.Record{ Partition: AutoAssign, Key: nil, Value: nil, }, keyBuffer: bytes.NewBuffer(nil), valueBuffer: bytes.NewBuffer(nil), } }, func(r *Record) *Record { //reset the record data r.kRecord = kgo.Record{ Partition: AutoAssign, Key: nil, Value: nil, } if len(r.kRecord.Headers) > 0 { r.kRecord.Headers = r.kRecord.Headers[0:0] } r.keyBuffer.Reset() r.valueBuffer.Reset() r.recordType = "" r.err = nil return r }) func NewRecord() *Record { return recordPool.Borrow() } type IncomingRecord struct { kRecord kgo.Record recordType string } func newIncomingRecord(incoming *kgo.Record) IncomingRecord { r := IncomingRecord{ kRecord: *incoming, } for _, header := range incoming.Headers { if header.Key == RecordTypeHeaderKey { r.recordType = string(header.Value) } } return r } func (r IncomingRecord) Offset() int64 { return r.kRecord.Offset } func (r IncomingRecord) TopicPartition() TopicPartition { return ntp(r.kRecord.Partition, r.kRecord.Topic) } func (r IncomingRecord) LeaderEpoch() int32 { return r.kRecord.LeaderEpoch } func (r IncomingRecord) Timestamp() time.Time { return r.kRecord.Timestamp } func (r IncomingRecord) RecordType() string { return r.recordType } func (r IncomingRecord) Key() []byte { return r.kRecord.Key } func (r IncomingRecord) Value() []byte { return r.kRecord.Value } func (r IncomingRecord) Headers() []kgo.RecordHeader { return r.kRecord.Headers } func (r IncomingRecord) HeaderValue(name string) []byte { for _, v := range r.kRecord.Headers { if v.Key == name { return v.Value } } return nil } func (r IncomingRecord) isMarkerRecord() bool { return isMarkerRecord(&r.kRecord) } func (r *Record) Offset() int64 { return r.kRecord.Offset } func (r *Record) TopicPartition() TopicPartition { return ntp(r.kRecord.Partition, r.kRecord.Topic) } func (r *Record) WriteKey(bs ...[]byte) { for _, b := range bs { r.keyBuffer.Write(b) } } func (r *Record) WriteKeyString(ss ...string) { for _, s := range ss { r.keyBuffer.WriteString(s) } } func (r *Record) KeyWriter() *bytes.Buffer { return r.keyBuffer } func (r *Record) WriteValue(bs ...[]byte) { for _, b := range bs { r.valueBuffer.Write(b) } } func (r *Record) WriteValueString(ss ...string) { for _, s := range ss { r.valueBuffer.WriteString(s) } } func (r *Record) ValueWriter() *bytes.Buffer { return r.valueBuffer } func (r *Record) WithTopic(topic string) *Record { r.kRecord.Topic = topic return r } func (r *Record) WithKey(key ...[]byte) *Record { r.WriteKey(key...) return r } func (r *Record) WithKeyString(key ...string) *Record { r.WriteKeyString(key...) return r } func (r *Record) WithValue(value ...[]byte) *Record { r.WriteValue(value...) return r } func (r *Record) WithHeader(key string, value []byte) *Record { r.kRecord.Headers = append(r.kRecord.Headers, kgo.RecordHeader{Key: key, Value: value}) return r } func (r *Record) WithRecordType(recordType string) *Record { r.recordType = recordType return r } func (r *Record) WithPartition(partition int32) *Record { r.kRecord.Partition = int32(partition) return r } func addRecordTypeHeader(recordType string, record *kgo.Record) { if len(recordType) == 0 { return } for _, header := range record.Headers { if header.Key == RecordTypeHeaderKey { return } } record.Headers = append(record.Headers, kgo.RecordHeader{ Key: RecordTypeHeaderKey, Value: []byte(recordType), }) } // used internally for producing. func (r *Record) toKafkaRecord() *kgo.Record { r.kRecord.Key = r.keyBuffer.Bytes() // an empty buffer should be a deletion // not sure if nil === empty for these purposes // so leaving nil to be sure if r.valueBuffer.Len() > 0 { r.kRecord.Value = r.valueBuffer.Bytes() } addRecordTypeHeader(r.recordType, &r.kRecord) // this record is already in the heap (it's part of the recordPool) // since we know that this pointer is guaranteed to outlive any produce calls // in the underlying kgo driver, let's prevent the compiler from escaping this // to the heap (again). this will significantly ease GC pressure // since we are producing a lot of records return (*kgo.Record)(sak.Noescape(unsafe.Pointer(&r.kRecord))) } // Creates a newly allocated kgo.Record. The Key and Value fields are freshly allocated bytes, copied from [streams.Record]. func (r *Record) ToKafkaRecord() *kgo.Record { record := new(kgo.Record) if r.keyBuffer.Len() > 0 { record.Key = append(record.Key, r.keyBuffer.Bytes()...) } if r.valueBuffer.Len() > 0 { record.Value = append(record.Value, r.valueBuffer.Bytes()...) } addRecordTypeHeader(r.recordType, &r.kRecord) return record } // A convenience function for unit testing. This method should not need to be invoked in a production code. func (r *Record) AsIncomingRecord() IncomingRecord { return IncomingRecord{ kRecord: *r.ToKafkaRecord(), recordType: r.recordType, } } func (r *Record) Error() error { return r.err } // A convenience function provided in case you are working with a raw kgo producer // and want to integrate with streams. This will ensure that the EventSource will route the record to the proper handler // without falling back to the defaultHandler func SetRecordType(r *kgo.Record, recordType string) { r.Headers = append(r.Headers, kgo.RecordHeader{ Key: RecordTypeHeaderKey, Value: []byte(recordType), }) } func (r *Record) Release() { recordPool.Release(r) } // ChangeLogEntry represents a Kafka record which wil be produced to the StateStore for your EventSource. // Note that you can not set a topic or partition on a ChangeLogEntry. These values are managed by GKES. type ChangeLogEntry struct { record *Record } func NewChangeLogEntry() ChangeLogEntry { return ChangeLogEntry{NewRecord()} } func (cle ChangeLogEntry) KeyWriter() *bytes.Buffer { return cle.record.KeyWriter() } func (cle ChangeLogEntry) ValueWriter() *bytes.Buffer { return cle.record.ValueWriter() } func (cle ChangeLogEntry) WriteKey(bs ...[]byte) { cle.record.WriteKey(bs...) } func (cle ChangeLogEntry) WriteKeyString(ss ...string) { cle.record.WriteKeyString(ss...) } func (cle ChangeLogEntry) WriteValue(bs ...[]byte) { cle.record.WriteValue(bs...) } func (cle ChangeLogEntry) WriteValueString(ss ...string) { cle.record.WriteValueString(ss...) } func (cle ChangeLogEntry) WithKey(key ...[]byte) ChangeLogEntry { cle.record.WriteKey(key...) return cle } func (cle ChangeLogEntry) WithKeyString(key ...string) ChangeLogEntry { cle.record.WriteKeyString(key...) return cle } func (cle ChangeLogEntry) WithValue(value ...[]byte) ChangeLogEntry { cle.record.WriteValue(value...) return cle } func (cle ChangeLogEntry) WithEntryType(entryType string) ChangeLogEntry { cle.record.recordType = entryType return cle } func (cle ChangeLogEntry) WithHeader(key string, value []byte) ChangeLogEntry { cle.record = cle.record.WithHeader(key, value) return cle } type OptionalPartitioner struct { manualPartitioner kgo.Partitioner defaultPartitioner kgo.Partitioner topicPartitioners map[string]kgo.Partitioner } type optionalTopicPartitioner struct { manualTopicPartitioner kgo.TopicPartitioner keyTopicPartitioner kgo.TopicPartitioner } // A kgo compatible partitioner which respects Record partitions that are manually assigned. // If the record partition is [AutoAssign], the provided kgo.Partitioner will be used for partition assignment. // Note: [NewRecord] will return a record with a partition of [AutoAssign]. func NewOptionalPartitioner(partitioner kgo.Partitioner) OptionalPartitioner { return NewOptionalPerTopicPartitioner(partitioner, map[string]kgo.Partitioner{}) } // A kgo compatible partitioner which respects Record partitions that are manually assigned. // Allows you to set different partitioner per topic. If a topic is encountered that has not been defined, defaultPartitioner will be used. func NewOptionalPerTopicPartitioner(defaultPartitioner kgo.Partitioner, topicPartitioners map[string]kgo.Partitioner) OptionalPartitioner { return OptionalPartitioner{ manualPartitioner: kgo.ManualPartitioner(), defaultPartitioner: defaultPartitioner, topicPartitioners: topicPartitioners, } } func (op OptionalPartitioner) ForTopic(topic string) kgo.TopicPartitioner { partitioner := op.defaultPartitioner if p, ok := op.topicPartitioners[topic]; ok { partitioner = p } return optionalTopicPartitioner{ manualTopicPartitioner: op.manualPartitioner.ForTopic(topic), keyTopicPartitioner: partitioner.ForTopic(topic), } } func (otp optionalTopicPartitioner) RequiresConsistency(_ *kgo.Record) bool { return true } func (otp optionalTopicPartitioner) Partition(r *kgo.Record, n int) int { if r.Partition == AutoAssign { return otp.keyTopicPartitioner.Partition(r, n) } return otp.manualTopicPartitioner.Partition(r, n) } // A shortcut method for createing a ChangeLogEntry with a json endcoded value. // // cle := CreateJsonChangeLogEntry(myValue).WithKeyString(myKey).WithEntryType(myType) // eventContext.RecordChange(cle) func CreateJsonChangeLogEntry[T any](item T) (ChangeLogEntry, error) { return CreateChangeLogEntry[T](item, JsonCodec[T]{}) } // A shortcut method for createing a ChangeLogEntry with a value endcoded using the provided codec. // // cle := CreateChangeLogEntry(myValue, myCodec).WithKeyString(myKey).WithEntryType(myType) // eventContext.RecordChange(cle) func CreateChangeLogEntry[T any](item T, codec Codec[T]) (ChangeLogEntry, error) { cle := NewChangeLogEntry() return cle, codec.Encode(cle.ValueWriter(), item) }
422
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "fmt" "sync/atomic" "time" "github.com/twmb/franz-go/pkg/kgo" ) type EventSourceState uint64 const ( Healthy EventSourceState = iota Unhealthy ) const consumerPollFetchTimeout = 5 * time.Second // a convenience function for polling the consumer to save repetitive code func pollConsumer(client *kgo.Client) (context.Context, kgo.Fetches) { ctx, cancel := context.WithTimeout(context.Background(), consumerPollFetchTimeout) f := client.PollFetches(ctx) cancel() return ctx, f } type EventSourceConfig struct { // The group id for the underlying Kafka consumer group. GroupId string // The Kafka topic to consume Topic string // The compacted Kafka topic on which to publish/consume [StateStore] data. If not provided, GKES will generate a name which includes // Topic and GroupId. StateStoreTopic string // The desired number of partitions for Topic. NumPartitions int // The desired replication factor for Topic. Defaults to 1. ReplicationFactor int // The desired min-insync-replicas for Topic. Defaults to 1. MinInSync int // The number of Kafka partitions to use for the applications commit log. Defaults to 5 if unset. CommitLogPartitions int // The Kafka cluster on which Topic resides, or the source of incoming events. SourceCluster Cluster // StateCluster is the Kafka cluster on which the commit log and the StateStore topic resides. If left unset (recommended), defaults to SourceCluster. StateCluster Cluster // The consumer rebalance strategies to use for the underlying Kafka consumer group. BalanceStrategies []BalanceStrategy /* CommitOffsets should be set to true if you are migrating from a traditional consumer group. This will ensure that the offsets are commited to the consumer group when in a mixed fleet scenario (migrating into an EventSource from a standard consumer). If the deploytment fails, the original non-EventSource application can then resume consuming from the commited offsets. Once the EventSource application is well-established, this setting should be switched to false as offsets are managed by another topic. In a EventSource application, committing offsets via the standard mechanism only consumes resources and provides no benefit. */ CommitOffsets bool /* The config used for the eos producer pool. If empty, [DefaultEosConfig] is used. If an EventSource is initialized with an invalid [EosConfig], the application will panic. */ EosConfig EosConfig // If non-nil, the EventSorce will emit [Metric] objects of varying types. This is backed by a channel. If the channel is full // (presumably because the MetricHandler is not able to keep up), // GKES will drop the metric and log at WARN level to prevent processing slow down. MetricsHandler MetricsHandler // Called when a partition has been assigned to the EventSource consumer client. This does not indicate that the partion is being processed. OnPartitionAssigned SourcePartitionEventHandler // Called when a perviously assigned partition has been activated, meaning the EventSource will start processing events for this partition. At the time this handler is called, the StateStore associated with this partition has been bootstrapped and is ready for use. OnPartitionActivated SourcePartitionEventHandler // Called when a partition is about to be revoked from the EventSource consumer client. // This is a blocking call and, as such, should return quickly. OnPartitionWillRevoke SourcePartitionEventHandler // Called when a partition has been revoked from the EventSource consumer client. // This handler is invoked after GKES has stopped processing and has finished removing any associated resources for the partition. OnPartitionRevoked SourcePartitionEventHandler DeserializationErrorHandler DeserializationErrorHandler TxnErrorHandler TxnErrorHandler } // A readonly wrapper of [EventSourceConfig]. When an [EventSource] is initialized, it reconciles the actual Topic configuration (NumPartitions) // from the Kafka cluster (or creates it if missing) and wraps the corrected [EventSourceConfig]. type Source struct { state uint64 config EventSourceConfig failure chan error } func newSource(config EventSourceConfig) *Source { return &Source{state: uint64(Healthy), config: config, failure: make(chan error)} } // A convenience method for creating a [Destination] form your Source. Can be used for creating a [Producer] or [BatchProducer] which publishes to your [EventSource]. func (s *Source) AsDestination() Destination { return Destination{ DefaultTopic: s.config.Topic, NumPartitions: s.config.NumPartitions, Cluster: s.config.SourceCluster, } } func (s *Source) onPartitionsAssigned(partitions []int32) { s.executeHandler(s.config.OnPartitionAssigned, partitions) } func (s *Source) onPartitionWillRevoke(partition int32) { s.executeHandler(s.config.OnPartitionWillRevoke, []int32{partition}) } func (s *Source) onPartitionActivated(partition int32) { s.executeHandler(s.config.OnPartitionActivated, []int32{partition}) } func (s *Source) onPartitionsRevoked(partitions []int32) { s.executeHandler(s.config.OnPartitionRevoked, partitions) } func (s *Source) shouldMarkCommit() bool { return s.config.CommitOffsets } func (s *Source) eosErrorHandler() TxnErrorHandler { if s.config.TxnErrorHandler == nil { return DefaultTxnErrorHandler } return s.config.TxnErrorHandler } func (s *Source) deserializationErrorHandler() DeserializationErrorHandler { if s.config.TxnErrorHandler == nil { return DefaultDeserializationErrorHandler } return s.config.DeserializationErrorHandler } func (s *Source) executeHandler(handler SourcePartitionEventHandler, partitions []int32) { if handler != nil { for _, p := range partitions { handler(s, p) } } } func (s *Source) State() EventSourceState { return EventSourceState(atomic.LoadUint64(&s.state)) } func (s *Source) fail(err error) { atomic.StoreUint64(&s.state, uint64(Unhealthy)) s.failure <- err } func (s *Source) Topic() string { return s.config.Topic } func (s *Source) GroupId() string { return s.config.GroupId } func (s *Source) Config() EventSourceConfig { return s.config } func (s *Source) BalanceStrategies() []BalanceStrategy { return s.config.BalanceStrategies } func (s *Source) NumPartitions() int { return s.config.NumPartitions } // Returns the formatted topic name used for the commit log of Source func (s *Source) CommitLogTopicNameForGroupId() string { return fmt.Sprintf("gkes_commit_log_%s", s.config.GroupId) } // Returns the formatted topic name used for the [StateStore] of Source func (s *Source) StateStoreTopicName() string { if len(s.config.StateStoreTopic) > 0 { return s.config.StateStoreTopic } return fmt.Sprintf("gkes_change_log_%s_%s", s.config.Topic, s.config.GroupId) } // Returns Source.StateCluster if defined, otherwise Source.Cluster func (s *Source) stateCluster() Cluster { if s.config.StateCluster == nil { return s.config.SourceCluster } return s.config.StateCluster } func minInSyncConfig(source *Source) int { factor := replicationFactorConfig(source) if factor <= 1 { return 1 } if source.config.MinInSync >= int(factor) { return source.config.ReplicationFactor - 1 } return source.config.MinInSync } func replicationFactorConfig(source *Source) int { if source.config.ReplicationFactor <= 0 { return 1 } return source.config.ReplicationFactor } func commitLogPartitionsConfig(source *Source) int { if source.config.CommitLogPartitions <= 0 { return 5 } return source.config.CommitLogPartitions }
239
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "sync" "time" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/twmb/franz-go/pkg/kadm" "github.com/twmb/franz-go/pkg/kgo" ) // A thick wrapper around a kgo.Client. Handles interaction with IncrementalRebalancer, as well as providing mechanisms for interjecting into a stream. type eventSourceConsumer[T StateStore] struct { client *kgo.Client partitionedStore *partitionedChangeLog[T] stateStoreConsumer *stateStoreConsumer[T] ctx context.Context workers map[int32]*partitionWorker[T] prepping map[int32]*stateStorePartition[T] workerMux sync.Mutex preppingMux sync.Mutex incrBalancer IncrementalGroupRebalancer eventSource *EventSource[T] source *Source commitLog *eosCommitLog producerPool *eosProducerPool[T] metrics chan Metric // prepping map[int32]*partitionPrepper[T] } // Creates a new eventSourceConsumer. // `eventSource` must be a fully initialized EventSource. func newEventSourceConsumer[T StateStore](eventSource *EventSource[T], additionalClientOptions ...kgo.Opt) (*eventSourceConsumer[T], error) { cl := newEosCommitLog(eventSource.ForkRunStatus(), eventSource.source, int(commitLogPartitionsConfig(eventSource.source))) var partitionedStore *partitionedChangeLog[T] source := eventSource.source partitionedStore = newPartitionedChangeLog(eventSource.createChangeLogReceiver, source.StateStoreTopicName()) sc := &eventSourceConsumer[T]{ partitionedStore: partitionedStore, ctx: eventSource.runStatus.Ctx(), workers: make(map[int32]*partitionWorker[T]), prepping: make(map[int32]*stateStorePartition[T]), eventSource: eventSource, source: source, commitLog: cl, metrics: eventSource.metrics, } balanceStrategies := source.config.BalanceStrategies if len(balanceStrategies) == 0 { balanceStrategies = DefaultBalanceStrategies source.config.BalanceStrategies = balanceStrategies } groupBalancers := toGroupBalancers(sc, balanceStrategies) balancerOpt := kgo.Balancers(groupBalancers...) opts := []kgo.Opt{ balancerOpt, kgo.ConsumerGroup(source.config.GroupId), kgo.ConsumeTopics(source.config.Topic), kgo.OnPartitionsAssigned(sc.partitionsAssigned), kgo.OnPartitionsRevoked(sc.partitionsRevoked), kgo.SessionTimeout(6 * time.Second), kgo.FetchMaxWait(time.Second), kgo.AdjustFetchOffsetsFn(sc.adjustOffsetsBeforeAssign)} if len(additionalClientOptions) > 0 { opts = append(opts, additionalClientOptions...) } if source.shouldMarkCommit() { // we're in a migrating consumer group (non-GKES to GKES otr vice-versa) opts = append(opts, kgo.AutoCommitMarks(), kgo.AutoCommitInterval(time.Second*5)) } else { opts = append(opts, kgo.DisableAutoCommit()) } client, err := NewClient( source.config.SourceCluster, opts...) eosConfig := source.config.EosConfig if eosConfig.IsZero() { eosConfig = DefaultEosConfig source.config.EosConfig = eosConfig } eosConfig.validate() sc.producerPool = newEOSProducerPool[T](source, cl, eosConfig, client, eventSource.metrics) for _, gb := range groupBalancers { if igr, ok := gb.(IncrementalGroupRebalancer); ok { sc.incrBalancer = igr break } } sc.client = client sc.stateStoreConsumer = newStateStoreConsumer[T](eventSource.ForkRunStatus(), source) if err != nil { return nil, err } return sc, nil } // Since we're using out own commit log, adjust the starting offset for a newly assigned partition to refelct what is in the commitLog. func (sc *eventSourceConsumer[T]) adjustOffsetsBeforeAssign(ctx context.Context, assignments map[string]map[int32]kgo.Offset) (map[string]map[int32]kgo.Offset, error) { for topic, partitionAssignments := range assignments { partitions := sak.MapKeysToSlice(partitionAssignments) for _, p := range partitions { tp := ntp(p, topic) offset := sc.commitLog.Watermark(tp) log.Infof("starting consumption for %+v at offset: %d", tp, offset+1) if offset > 0 { partitionAssignments[p] = kgo.NewOffset().At(offset) } } } return assignments, nil } func (sc *eventSourceConsumer[T]) Client() *kgo.Client { return sc.client } // Needed to fulfill the IncrRebalanceInstructionHandler interface defined by IncrementalGroupRebalancer. // Should NOT be invoked directly. func (sc *eventSourceConsumer[T]) PrepareTopicPartition(tp TopicPartition) { sc.preppingMux.Lock() defer sc.preppingMux.Unlock() partition := tp.Partition if _, ok := sc.prepping[partition]; !ok { store := sc.partitionedStore.assign(partition) ssp := sc.stateStoreConsumer.preparePartition(partition, store) sc.prepping[partition] = ssp go func() { start := time.Now() log.Debugf("prepping %+v", tp) ssp.sync() processed := ssp.processed() duration := time.Since(start) log.Debugf("Prepped %+v, %d messages in %v (tps: %d)", tp, processed, duration, int(float64(processed)/duration.Seconds())) sc.incrBalancer.PartitionPrepared(tp) if sc.metrics != nil { sc.metrics <- Metric{ Operation: PartitionPreppedOperation, StartTime: start, EndTime: time.Now(), PartitionCount: 1, Partition: partition, Count: int(processed), Bytes: int(ssp.processedBytes()), GroupId: sc.source.GroupId(), } } }() } } // Needed to fulfill the IncrRebalanceInstructionHandler interface defined by IncrementalGroupRebalancer. // Should NOT be invoked directly. func (sc *eventSourceConsumer[T]) ForgetPreparedTopicPartition(tp TopicPartition) { sc.preppingMux.Lock() defer sc.preppingMux.Unlock() if _, ok := sc.prepping[tp.Partition]; ok { sc.stateStoreConsumer.cancelPartition(tp.Partition) delete(sc.prepping, tp.Partition) } else { // what to do? probably nothing, but if we have a double assignment, we could have problems // need to investigate this race condition further log.Warnf("ForgetPreparedTopicPartition failed for %+v", tp) } } func (sc *eventSourceConsumer[T]) assignPartitions(topic string, partitions []int32) { sc.workerMux.Lock() defer sc.workerMux.Unlock() sc.preppingMux.Lock() defer sc.preppingMux.Unlock() for _, p := range partitions { tp := TopicPartition{Partition: p, Topic: topic} store := sc.partitionedStore.assign(p) // we want to pause this until the partitionWorker is ready // otherwise we could fill our buffer and block other partitions while we sync the state store // sc.client.PauseFetchPartitions(map[string][]int32{topic: {p}}) if prepper, ok := sc.prepping[p]; ok { log.Infof("syncing prepped partition %+v", prepper.topicPartition) delete(sc.prepping, p) sc.workers[p] = newPartitionWorker(sc.eventSource, tp, sc.commitLog, store, sc.producerPool, func() { prepper.sync() // sc.client.ResumeFetchPartitions(map[string][]int32{topic: {p}}) }) } else if _, ok := sc.workers[p]; !ok { prepper = sc.stateStoreConsumer.activatePartition(p, store) log.Infof("syncing unprepped partition %+v", prepper.topicPartition) sc.workers[p] = newPartitionWorker(sc.eventSource, tp, sc.commitLog, store, sc.producerPool, func() { prepper.sync() // sc.client.ResumeFetchPartitions(map[string][]int32{topic: {p}}) }) } } sc.incrBalancer.PartitionsAssigned(toTopicPartitions(topic, partitions...)...) // notify observers sc.source.onPartitionsAssigned(partitions) } func (sc *eventSourceConsumer[T]) revokePartitions(topic string, partitions []int32) { sc.workerMux.Lock() defer sc.workerMux.Unlock() if sc.partitionedStore == nil { return } for _, p := range partitions { sc.source.onPartitionWillRevoke(p) if worker, ok := sc.workers[p]; ok { worker.revoke() delete(sc.workers, p) } sc.partitionedStore.revoke(p) } // notify observers sc.source.onPartitionsRevoked(partitions) } func (sc *eventSourceConsumer[T]) partitionsAssigned(ctx context.Context, _ *kgo.Client, assignments map[string][]int32) { for topic, partitions := range assignments { log.Debugf("assigned topic: %s, partitions: %v", topic, assignments) sc.assignPartitions(topic, partitions) } } func (sc *eventSourceConsumer[T]) partitionsRevoked(ctx context.Context, _ *kgo.Client, assignments map[string][]int32) { for topic, partitions := range assignments { log.Debugf("revoked topic: %s, partitions: %v", topic, assignments) sc.revokePartitions(topic, partitions) } } func (sc *eventSourceConsumer[T]) receive(p kgo.FetchTopicPartition) { sc.workerMux.Lock() worker, ok := sc.workers[p.Partition] sc.workerMux.Unlock() if !ok || len(p.Records) == 0 { return } worker.add(p.Records) } // Starts the underlying kafka client and syncs the local commit log for the consumer group. // Once synced, polls for records and forwards them to partitionWorkers. func (sc *eventSourceConsumer[T]) start() { go sc.commitLog.Start() sc.commitLog.syncAll() for { ctx, f := pollConsumer(sc.client) if f.IsClientClosed() { log.Infof("client closed for group: %v", sc.source.GroupId()) return } for _, err := range f.Errors() { if err.Err != ctx.Err() { log.Errorf("%v", err) } } f.EachPartition(sc.receive) } } // Inserts the interjection into the appropriate partition workers interjectionChannel. Returns immediately if the partiotns is not currently assigned. func (sc *eventSourceConsumer[T]) interject(partition int32, cmd Interjector[T]) <-chan error { sc.workerMux.Lock() defer sc.workerMux.Unlock() c := make(chan error, 1) w := sc.workers[partition] if w == nil { c <- ErrPartitionNotAssigned return c } if !w.canInterject() { c <- ErrPartitionNotAssigned return c } w.interjectionInput <- &interjection[T]{ isOneOff: true, topicPartition: w.topicPartition, interjector: func(ec *EventContext[T], t time.Time) ExecutionState { state := cmd(ec, t) close(c) return state }, } return c } // A convenience function which allows you to Interject into every active partition assigned to the consumer // without create an individual timer per partition. // InterjectNow() will be invoked each active partition, blocking on each iteration until the Interjection can be processed. // Useful for gathering store statistics, but can be used in place of a standard Interjection. func (sc *eventSourceConsumer[T]) forEachChangeLogPartitionSync(interjector Interjector[T]) { sc.workerMux.Lock() ps := sak.MapKeysToSlice(sc.workers) sc.workerMux.Unlock() for _, p := range ps { if err := <-sc.interject(p, interjector); err != nil { log.Errorf("Could not interject into %d, error: %v", p, err) } } } type interjectionTracker struct { partition int32 c <-chan error } func (sc *eventSourceConsumer[T]) forEachChangeLogPartitionAsync(interjector Interjector[T]) { sc.workerMux.Lock() ps := sak.MapKeysToSlice(sc.workers) sc.workerMux.Unlock() its := make([]interjectionTracker, 0, len(ps)) for _, p := range ps { its = append(its, interjectionTracker{p, sc.interject(p, interjector)}) } for _, it := range its { if err := <-it.c; err != nil { log.Errorf("Could not interject into %d, error: %v", it.partition, err) } } } // TODO: This needs some more work after we provide balancer configuration. // If the group only has 1 allowed protocol, there is no need for this check. // If there are multiple, we need to interrogate Kafa to see which is active func (sc *eventSourceConsumer[T]) currentProtocolIsIncremental() bool { if sc.incrBalancer == nil { return false } if len(sc.source.BalanceStrategies()) <= 1 { return true } // we're going to attempt to retrieve the current protocol // this is somehat unrealiable as if the group state is in PreparingRebalance mode, // the protocol returned will be empty adminClient := kadm.NewClient(sc.Client()) groups, err := adminClient.DescribeGroups(context.Background(), sc.source.GroupId()) if err != nil || len(groups) == 0 { log.Errorf("could not confirm group protocol: %v", err) return false } log.Debugf("consumerGroup protocol response: %+v", groups) group := groups[sc.source.GroupId()] if len(group.Protocol) == 0 { log.Warnf("could not retrieve group rebalance protocol, group state: %v", group.State) } return group.Protocol == IncrementalCoopProtocol } // Signals the IncrementalReblancer to start the process of shutting down this consumer in an orderly fashion. func (sc *eventSourceConsumer[T]) leave() <-chan struct{} { log.Infof("leave signaled for group: %v", sc.source.GroupId()) c := make(chan struct{}, 1) if sc.incrBalancer == nil || !sc.currentProtocolIsIncremental() { sc.stop() c <- struct{}{} return c } go func() { <-sc.incrBalancer.GracefullyLeaveGroup() sc.stop() c <- struct{}{} }() return c } // Immediately stops the consumer, leaving the consumer group abruptly. func (sc *eventSourceConsumer[T]) stop() { sc.client.Close() log.Infof("left group: %v", sc.source.GroupId()) }
400
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "sync" "sync/atomic" "time" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/google/uuid" "github.com/twmb/franz-go/pkg/kgo" ) type partitionState uint32 const ( paused partitionState = iota prepping standby ready active ) const markerKeyString = "gkes__mark" var markerKey = []byte(markerKeyString) var startEpochOffset = kgo.EpochOffset{ Offset: 0, Epoch: -1, } type StateStore interface { ReceiveChange(IncomingRecord) error Revoked() } type stateStorePartition[T StateStore] struct { buffer chan []*kgo.Record client *kgo.Client waiters map[string]chan struct{} topicPartition TopicPartition state partitionState count uint64 byteCount uint64 waiterLock sync.Mutex highWatermark int64 } func (ssp *stateStorePartition[T]) add(ftp kgo.FetchTopicPartition) { ssp.buffer <- ftp.FetchPartition.Records } func (ssp *stateStorePartition[T]) pause() { ssp.setState(paused) topic := ssp.topicPartition.Topic partition := ssp.topicPartition.Partition ssp.client.PauseFetchPartitions(map[string][]int32{ topic: {partition}, }) } func (ssp *stateStorePartition[T]) kill() { if ssp.buffer != nil { close(ssp.buffer) ssp.buffer = nil } } func (ssp *stateStorePartition[T]) cancel() { ssp.pause() ssp.kill() } func (ssp *stateStorePartition[T]) addWaiter() (c chan struct{}, mark []byte) { ssp.waiterLock.Lock() defer ssp.waiterLock.Unlock() c = make(chan struct{}) s := uuid.NewString() mark = []byte(s) ssp.waiters[s] = c return } func (ssp *stateStorePartition[T]) removeWaiterForMark(mark []byte) (chan struct{}, bool) { ssp.waiterLock.Lock() defer ssp.waiterLock.Unlock() if c, ok := ssp.waiters[string(mark)]; ok { delete(ssp.waiters, string(mark)) return c, true } return nil, false } func (ssp *stateStorePartition[T]) sync() { c, mark := ssp.addWaiter() sendMarkerMessage(ssp.client, ssp.topicPartition, mark) <-c } func (ssp *stateStorePartition[T]) processed() uint64 { return atomic.LoadUint64(&ssp.count) } func (ssp *stateStorePartition[T]) processedBytes() uint64 { return atomic.LoadUint64(&ssp.byteCount) } func (ssp *stateStorePartition[T]) partitionState() partitionState { return partitionState(atomic.LoadUint32((*uint32)(&ssp.state))) } func (ssp *stateStorePartition[T]) setState(state partitionState) { atomic.StoreUint32((*uint32)(&ssp.state), uint32(state)) } func (ssp *stateStorePartition[T]) prep(intitialState partitionState, store changeLogPartition[T]) { ssp.setState(intitialState) ssp.count = 0 ssp.byteCount = 0 ssp.buffer = make(chan []*kgo.Record, 1024) topic := ssp.topicPartition.Topic partition := ssp.topicPartition.Partition ssp.client.SetOffsets(map[string]map[int32]kgo.EpochOffset{ topic: {partition: startEpochOffset}, }) ssp.client.ResumeFetchPartitions(map[string][]int32{ topic: {partition}, }) go ssp.populate(store) } func (ssp *stateStorePartition[T]) isCompletionMarker(val []byte) (complete bool) { var waiter chan struct{} var ok bool if waiter, ok = ssp.removeWaiterForMark(val); !ok { return } switch ssp.partitionState() { case prepping: log.Debugf("transitioning from prepping to ready for %+v", ssp.topicPartition) ssp.setState(ready) case ready: log.Debugf("transitioning from ready to paused for %+v", ssp.topicPartition) complete = true ssp.cancel() } close(waiter) return } func (ssp *stateStorePartition[T]) populate(store changeLogPartition[T]) { log.Debugf("starting populator for %+v", ssp.topicPartition) for records := range ssp.buffer { if !ssp.handleRecordsAndContinue(records, store) { log.Debugf("closed populator for %+v", ssp.topicPartition) return } } } func (ssp *stateStorePartition[T]) handleRecordsAndContinue(records []*kgo.Record, store changeLogPartition[T]) bool { for _, record := range records { if isMarkerRecord(record) { if ssp.isCompletionMarker(record.Value) { return false } } else { store.receiveChangeInternal(record) // if err := ; err != nil { // log.Errorf("Error receiving change on topic: %s, partition: %d, offset: %d, err: %v", // record.Topic, record.Partition, record.Offset, err) // } atomic.AddUint64(&ssp.count, 1) atomic.AddUint64(&ssp.byteCount, uint64(recordSize(*record))) } } return true } type stateStoreConsumer[T StateStore] struct { runStatus sak.RunStatus partitions map[int32]*stateStorePartition[T] source *Source client *kgo.Client mux sync.Mutex topic string } func newStateStoreConsumer[T StateStore](runStatus sak.RunStatus, source *Source) *stateStoreConsumer[T] { partitionCount := int32(source.config.NumPartitions) stateStorePartitions := make(map[int32]*stateStorePartition[T], partitionCount) assignments := make(map[int32]kgo.Offset, partitionCount) partitions := make([]int32, partitionCount) topic := source.StateStoreTopicName() for i := int32(0); i < partitionCount; i++ { partitions[i] = i assignments[i] = kgo.NewOffset().AtStart() } client, err := NewClient(source.stateCluster(), kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{ topic: assignments, }), kgo.FetchMaxWait(time.Second), kgo.RecordPartitioner(kgo.ManualPartitioner()), ) if err != nil { panic(err) } client.PauseFetchPartitions(map[string][]int32{ topic: partitions, }) for i := int32(0); i < partitionCount; i++ { stateStorePartitions[i] = &stateStorePartition[T]{ topicPartition: TopicPartition{Partition: i, Topic: topic}, waiters: make(map[string]chan struct{}, 2), client: client, state: paused, buffer: make(chan []*kgo.Record, 1024), highWatermark: -1, } } ssc := &stateStoreConsumer[T]{ runStatus: runStatus, source: source, partitions: stateStorePartitions, client: client, topic: topic, } go ssc.consume() return ssc } func (ssc *stateStoreConsumer[T]) consume() { for ssc.runStatus.Running() { ctx, f := pollConsumer(ssc.client) if f.IsClientClosed() { log.Debugf("stateStoreConsumer client closed") ssc.stop() return } for _, fetchErr := range f.Errors() { if fetchErr.Err != ctx.Err() { log.Errorf("%v", fetchErr) } } f.EachPartition(func(partitionFetch kgo.FetchTopicPartition) { ssp := ssc.partitions[partitionFetch.Partition] if ssp.partitionState() == paused { ssp.pause() } else { ssp.add(partitionFetch) } }) } ssc.stop() log.Debugf("stateStoreConsumer halted") ssc.client.Close() } func (ssc *stateStoreConsumer[T]) cancelPartition(p int32) { ssc.mux.Lock() defer ssc.mux.Unlock() ssp := ssc.partitions[p] ssp.cancel() } func (ssc *stateStoreConsumer[T]) preparePartition(p int32, store changeLogPartition[T]) *stateStorePartition[T] { ssc.mux.Lock() defer ssc.mux.Unlock() ssp := ssc.partitions[p] ssp.prep(prepping, store) return ssp } func (ssc *stateStoreConsumer[T]) activatePartition(p int32, store changeLogPartition[T]) *stateStorePartition[T] { ssc.mux.Lock() defer ssc.mux.Unlock() ssp := ssc.partitions[p] ssp.prep(ready, store) return ssp } func (ssc *stateStoreConsumer[T]) stop() { ssc.mux.Lock() defer ssc.mux.Unlock() for _, ssp := range ssc.partitions { ssp.kill() } } func sendMarkerMessage(producer *kgo.Client, tp TopicPartition, mark []byte) error { record := kgo.KeySliceRecord(markerKey, mark) record.Topic = tp.Topic record.Partition = tp.Partition record.Headers = append(record.Headers, kgo.RecordHeader{Key: markerKeyString}) log.Debugf("sending marker message to: %+v", tp) c := make(chan struct{}) var err error producer.Produce(context.Background(), record, func(r *kgo.Record, e error) { err = e close(c) }) <-c return err }
323
go-kafka-event-source
aws
Go
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package streams import ( "context" "errors" "fmt" "net" "strconv" "time" "github.com/aws/go-kafka-event-source/streams/sak" "github.com/google/btree" "github.com/twmb/franz-go/pkg/kadm" "github.com/twmb/franz-go/pkg/kgo" ) type TopicPartition struct { Partition int32 Topic string } // var missingTopicError = errors.New("topic does not exist") // ntp == 'New Topic Partition'. Essentially a macro for TopicPartition{Parition: p, Topic: t} which is quite verbose func ntp(p int32, t string) TopicPartition { return TopicPartition{Partition: p, Topic: t} } var tpSetFreeList = btree.NewFreeListG[TopicPartition](128) // A convenience data structure. It is what the name implies, a Set of TopicPartitions. // This data structure is not thread-safe. You will need to providde your own locking mechanism. type TopicPartitionSet struct { *btree.BTreeG[TopicPartition] } // Comparator for TopicPartitions func topicPartitionLess(a, b TopicPartition) bool { res := a.Partition - b.Partition if res != 0 { return res < 0 } return a.Topic < b.Topic } // Returns a new, empty TopicPartitionSet. func NewTopicPartitionSet() TopicPartitionSet { return TopicPartitionSet{btree.NewWithFreeListG(16, topicPartitionLess, tpSetFreeList)} } // Insert the TopicPartition. Returns true if the item was inserted, false if the item was aready present func (tps TopicPartitionSet) Insert(tp TopicPartition) bool { _, ok := tps.ReplaceOrInsert(tp) return !ok } // Tertuens true if the tp is currently a member of TopicPartitionSet func (tps TopicPartitionSet) Contains(tp TopicPartition) bool { _, ok := tps.Get(tp) return ok } // Removes tp from the TopicPartitionSet. Rerurns true is the item was present. func (tps TopicPartitionSet) Remove(tp TopicPartition) bool { _, ok := tps.Delete(tp) return ok } // Converts the set to a newly allocate slice of TopicPartitions. func (tps TopicPartitionSet) Items() []TopicPartition { slice := make([]TopicPartition, 0, tps.Len()) tps.Ascend(func(tp TopicPartition) bool { slice = append(slice, tp) return true }) return slice } // An interface for implementing a resusable Kafka client configuration. // TODO: document reserved options type Cluster interface { // Returns the list of kgo.Opt(s) that will be used whenever a connection is made to this cluster. // At minimum, it should return the kgo.SeedBrokers() option. Config() ([]kgo.Opt, error) } // A [Cluster] implementation useful for local development/testing. Establishes a plain text connection to a Kafka cluster. // For a more advanced example, see [github.com/aws/go-kafka-event-source/msk]. // // cluster := streams.SimpleCluster([]string{"127.0.0.1:9092"}) type SimpleCluster []string // Returns []kgo.Opt{kgo.SeedBrokers(sc...)} func (sc SimpleCluster) Config() ([]kgo.Opt, error) { return []kgo.Opt{kgo.SeedBrokers(sc...)}, nil } // NewClient creates a kgo.Client from the options retuned from the provided [Cluster] and addtional `options`. // Used internally and exposed for convenience. func NewClient(cluster Cluster, options ...kgo.Opt) (*kgo.Client, error) { configOptions := []kgo.Opt{kgo.WithLogger(kgoLogger), kgo.ProducerBatchCompression(kgo.NoCompression()), kgo.FetchIsolationLevel(kgo.ReadCommitted()), kgo.ProducerLinger(5 * time.Millisecond), kgo.RecordPartitioner(NewOptionalPartitioner(kgo.StickyKeyPartitioner(nil)))} clusterOpts, err := cluster.Config() if err != nil { return nil, err } configOptions = append(configOptions, clusterOpts...) configOptions = append(configOptions, options...) return kgo.NewClient(configOptions...) } func createTopicFromConfigMap(adminClient *kadm.Client, numPartitions int32, replicationFactor int16, config map[string]*string, topic ...string) error { res, err := adminClient.CreateTopics(context.Background(), numPartitions, replicationFactor, config, topic...) log.Infof("createTopic res: %+v, err: %v", res, err) return err } func createTopic(adminClient *kadm.Client, numPartitions int, replicationFactor int, minInsync int, cleanupPolicy CleanupPolicy, dirtyRatio float64, topic ...string) error { configMap := map[string]*string{ "min.insync.replicas": sak.Ptr(strconv.Itoa(minInsync)), } if cleanupPolicy == CompactCleanupPolicy { configMap["cleanup.policy"] = sak.Ptr("compact") configMap["min.cleanable.dirty.ratio"] = sak.Ptr(strconv.FormatFloat(dirtyRatio, 'f', 2, 64)) } return createTopicFromConfigMap(adminClient, int32(numPartitions), int16(replicationFactor), configMap, topic...) } func createDestination(destination Destination) (Destination, error) { client, err := NewClient(destination.Cluster) adminClient := kadm.NewClient(client) minInSync := fmt.Sprintf("%d", destination.MinInSync) createTopicFromConfigMap(adminClient, int32(destination.NumPartitions), int16(destination.ReplicationFactor), map[string]*string{ "min.insync.replicas": sak.Ptr(minInSync), }) if err != nil { return destination, err } return destination, err } func CreateDestination(destination Destination) (resolved Destination, err error) { for retryCount := 0; retryCount < 15; retryCount++ { resolved, err = createDestination(destination) if isNetworkError(err) { time.Sleep(time.Second) } } return } func createSource(source *Source) (*Source, error) { sourceTopicClient, err := NewClient(source.config.SourceCluster, kgo.RequestRetries(20), kgo.RetryTimeout(30*time.Second)) if err != nil { return source, err } eosClient, err := NewClient(source.stateCluster(), kgo.RequestRetries(20)) if err != nil { return source, err } sourceTopicAdminClient := kadm.NewClient(sourceTopicClient) eosAdminClient := kadm.NewClient(eosClient) source, err = resolveOrCreateTopics(source, sourceTopicAdminClient, eosAdminClient) sourceTopicClient.Close() eosClient.Close() return source, err } func isNetworkError(err error) bool { if err == nil { return false } var opError *net.OpError if errors.As(err, &opError) { log.Warnf("network error for operation: %s, error: %v", opError.Op, opError) return true } else if err != nil { log.Errorf("non network error for operation: %s, error: %v", opError.Op, opError) } return false } // Creates all necessary topics in the Kafka appropriate clusters as defined by Source. // Automatically invoked as part of NewSourceConsumer(). Ignores errros TOPIC_ALREADT_EXISTS errors. // Returns a corrected Source where NumPartitions and CommitLogPartitions are pulled from a ListTopics call. This is to prevent drift errors. // Returns an error if the details for Source topics could not be retrieved, or if there is a mismatch in partition counts fo the source topic and change log topic. func CreateSource(sourceConfig EventSourceConfig) (resolved *Source, err error) { if len(sourceConfig.GroupId) == 0 { return nil, fmt.Errorf("GroupId not provided") } if len(sourceConfig.Topic) == 0 { return nil, fmt.Errorf("Topic not provided") } if sourceConfig.SourceCluster == nil { return nil, fmt.Errorf("SourceCluster not provided") } source := newSource(sourceConfig) for retryCount := 0; retryCount < 15; retryCount++ { resolved, err = createSource(source) if isNetworkError(err) { time.Sleep(time.Second) } else { break } } return } func resolveOrCreateTopics(source *Source, sourceTopicAdminClient, eosAdminClient *kadm.Client) (*Source, error) { topic := source.Topic() commitLogName := source.CommitLogTopicNameForGroupId() changLogName := source.StateStoreTopicName() res, err := sourceTopicAdminClient.ListTopicsWithInternal(context.Background(), topic) if err != nil { return nil, err } if val, ok := res[topic]; ok && val.Err == nil { source.config.NumPartitions = len(val.Partitions.Numbers()) source.config.ReplicationFactor = val.Partitions.NumReplicas() } else { err = createTopic(sourceTopicAdminClient, source.NumPartitions(), replicationFactorConfig(source), minInSyncConfig(source), DeleteCleanupPolicy, 1, topic) if err != nil { return nil, err } } topics := []string{commitLogName} if len(changLogName) > 0 { topics = append(topics, changLogName) } res, err = eosAdminClient.ListTopicsWithInternal(context.Background(), topics...) if err != nil { return nil, err } if val, ok := res[commitLogName]; ok && val.Err == nil { source.config.CommitLogPartitions = len(val.Partitions.Numbers()) } else { err = createTopic(eosAdminClient, commitLogPartitionsConfig(source), replicationFactorConfig(source), minInSyncConfig(source), CompactCleanupPolicy, 0.9, commitLogName) if err != nil { return nil, err } } if len(changLogName) > 0 { if val, ok := res[changLogName]; ok && val.Err == nil { changeLogPartitionCount := len(val.Partitions.Numbers()) if changeLogPartitionCount != source.config.NumPartitions { return nil, fmt.Errorf("change log partitition count (%d) does not match source topic partition count (%d)", changeLogPartitionCount, source.config.NumPartitions) } } else { err = createTopic(eosAdminClient, source.NumPartitions(), replicationFactorConfig(source), minInSyncConfig(source), CompactCleanupPolicy, 0.5, changLogName) if err != nil { return nil, err } } } return source, nil } // Deletes all topics associated with a Source. Provided for local testing purpoose only. // Do not call this in deployed applications unless your topics are transient in nature. func DeleteSource(sourceConfig EventSourceConfig) error { source := newSource(sourceConfig) sourceTopicClient, err := NewClient(source.config.SourceCluster) if err != nil { return err } eosClient, err := NewClient(source.stateCluster()) if err != nil { return err } sourceTopicAdminClient := kadm.NewClient(sourceTopicClient) eosAdminClient := kadm.NewClient(eosClient) sourceTopicAdminClient.DeleteTopics(context.Background(), source.config.Topic) eosAdminClient.DeleteTopics(context.Background(), source.CommitLogTopicNameForGroupId(), source.StateStoreTopicName()) return nil } func isMarkerRecord(record *kgo.Record) bool { return len(record.Headers) == 1 && record.Headers[0].Key == markerKeyString } func toTopicPartitions(topic string, partitions ...int32) []TopicPartition { tps := make([]TopicPartition, len(partitions)) for i, p := range partitions { tps[i] = ntp(p, topic) } return tps }
317