repo_name
stringlengths
1
52
repo_creator
stringclasses
6 values
programming_language
stringclasses
4 values
code
stringlengths
0
9.68M
num_lines
int64
1
234k
eks-distro
aws
Go
package projects import ( "bufio" "fmt" "io" "os" "os/exec" "path/filepath" "strconv" "strings" "github.com/aws/eks-distro/cmd/release/utils/projects" ) var ( prevReleaseBranch string nextReleaseBranch string ) // CreateFilesAndDirectories returns the number of files generated. If there was an error, returns -1 func CreateFilesAndDirectories(prevReleaseBranchInput string, nextReleaseBranchInput string) (int, error) { prevReleaseBranch, nextReleaseBranch = prevReleaseBranchInput, nextReleaseBranchInput eksdProjects, err := projects.GetProjects() if err != nil { return -1, fmt.Errorf("getting projects: %w", err) } for _, project := range eksdProjects { projectPath := project.GetFilePath() prevReleaseBranchRepoPath := filepath.Join(projectPath, prevReleaseBranch) if _, err = os.Stat(prevReleaseBranchRepoPath); os.IsNotExist(err) { return -1, fmt.Errorf("expected %s to exist but it does not", prevReleaseBranchRepoPath) } // Create new release branch directory at projects/<org>/<repo>/<release_branch> nextReleaseBranchRepoPath := filepath.Join(projectPath, nextReleaseBranch) if err = os.Mkdir(nextReleaseBranchRepoPath, 0755); err != nil { return -1, fmt.Errorf("creating new directory %s", nextReleaseBranchRepoPath) } // Copy all files and directories from the latest release branch to new one err = copyDir(prevReleaseBranchRepoPath, nextReleaseBranchRepoPath) if err != nil { return -1, fmt.Errorf("copying directories and files: %w", err) } } // Validate results prevReleaseBranchFileCount, err := getFileCount(prevReleaseBranch) if err != nil { return -1, fmt.Errorf("getting previous release branch project file count: %w", err) } nextReleaseBranchFileCount, err := getFileCount(nextReleaseBranch) if err != nil { return -1, fmt.Errorf("getting next release branch project file count: %w", err) } if prevReleaseBranchFileCount != nextReleaseBranchFileCount { return -1, fmt.Errorf("expected previous release branch file count (%d) to match next release branch file count (%d)", prevReleaseBranchFileCount, nextReleaseBranchFileCount) } return nextReleaseBranchFileCount, nil } func copyDir(prevReleaseBranchPath, nextReleaseBranchPath string) error { prevReleaseBranchDirs, err := os.ReadDir(prevReleaseBranchPath) if err != nil { return fmt.Errorf("reading directory at path %s: %w", prevReleaseBranchPath, err) } else if len(prevReleaseBranchDirs) == 0 { return nil } for _, prevReleaseBranchChildDir := range prevReleaseBranchDirs { prevReleaseBranchChildPath := filepath.Join(prevReleaseBranchPath, prevReleaseBranchChildDir.Name()) nextReleaseBranchDirPath := filepath.Join(nextReleaseBranchPath, prevReleaseBranchChildDir.Name()) if prevReleaseBranchChildDir.IsDir() { if err = os.Mkdir(nextReleaseBranchDirPath, 0755); err != nil { return fmt.Errorf("creating new directory to copy %s", nextReleaseBranchDirPath) } if err = copyDir(prevReleaseBranchChildPath, nextReleaseBranchDirPath); err != nil { return fmt.Errorf("copying child directory %s", prevReleaseBranchChildPath) } } else { if err = copyFile(prevReleaseBranchChildPath, nextReleaseBranchDirPath); err != nil { return fmt.Errorf("coping file: %w", err) } } } return nil } func copyFile(prevReleaseBranchFilePath, nextReleaseBranchFilePath string) error { existingFile, err := os.Open(prevReleaseBranchFilePath) if err != nil { return fmt.Errorf("opening file %s to copy: %w", prevReleaseBranchFilePath, err) } defer existingFile.Close() newFile, err := os.Create(nextReleaseBranchFilePath) if err != nil { return fmt.Errorf("creating file %s to write to as a copy: %w", nextReleaseBranchFilePath, err) } defer newFile.Close() if filepath.Base(existingFile.Name()) == "CHECKSUMS" { scanner := bufio.NewScanner(existingFile) for scanner.Scan() { updatedLine := strings.ReplaceAll(scanner.Text(), prevReleaseBranch, nextReleaseBranch) if _, err = fmt.Fprintln(newFile, updatedLine); err != nil { return fmt.Errorf("modifying CHECKSUM file: %w", err) } } if err = scanner.Err(); err != nil { return fmt.Errorf("scanning CHECKSUMS: %w", err) } } else { if _, err = io.Copy(newFile, existingFile); err != nil { return fmt.Errorf("copying files: %w", err) } } return nil } func getFileCount(releaseBranch string) (int, error) { pathPattern := filepath.Join(projects.GetProjectPathRoot(), "*", "*", releaseBranch, "*") out, err := exec.Command("bash", "-c", fmt.Sprintf("find %s -type f | wc -l", pathPattern)).Output() if err != nil { return -1, fmt.Errorf("getting file count for %s: %w", releaseBranch, err) } count, err := strconv.Atoi(strings.TrimSpace(string(out))) if err != nil { return -1, fmt.Errorf("converting file count %v to int: %w", out, err) } return count, nil }
134
eks-distro
aws
Go
package main import ( "flag" "fmt" "log" "os" "strconv" "github.com/aws/eks-distro/cmd/release/utils/changetype" "github.com/aws/eks-distro/cmd/release/utils/git" "github.com/aws/eks-distro/cmd/release/utils/values" ) // Updates RELEASE number for dev or prod, depending on the values provided to the appropriate flags. // TODO: fix all logic around undoing changes if error. func main() { branch := flag.String("branch", "", "Release branch, e.g. 1-20") isProd := flag.Bool("isProd", false, "True for prod; false for dev") flag.Parse() environment := func() changetype.ChangeType { if *isProd { return changetype.Prod } return changetype.Dev }() nextNumber, numberFilePath, err := getNextNumber(*branch, environment) if err != nil { log.Fatalf("calculating %s RELEASE: %v", environment, err) } gm, err := git.CreateGitManager(*branch, nextNumber, environment) if err != nil { log.Fatalf("creating git manager for %s RELEASE: %v", environment, err) } if err = updateEnvironmentReleaseNumber(nextNumber, numberFilePath); err != nil { cleanUpErrs := gm.RestoreFileAndAbandonAllChanges(numberFilePath) if len(cleanUpErrs) > 0 { log.Printf("encountered %d error(s) while attemptng to clean up due to earlier error: %v", len(cleanUpErrs), cleanUpErrs) } log.Fatalf("writing to %s RELEASE: %v", environment, err) } if err = gm.AddAndCommit(numberFilePath); err != nil { cleanUpErrs := gm.RestoreFileAndAbandonAllChanges(numberFilePath) if len(cleanUpErrs) > 0 { log.Printf("encountered %d error(s) while attemptng to clean up due to earlier error: %v", len(cleanUpErrs), cleanUpErrs) } log.Fatalf("adding and committing: %v", err) } if err = gm.OpenPR(); err != nil { log.Fatalf("adding and committing: %v", err) } } // getNextNumber returns the next number and the filepath to the local file for the current number used to determine // the next number. func getNextNumber(branch string, ct changetype.ChangeType) (nextNum string, numPath values.AbsolutePath, err error) { currNum, numPath, err := values.GetLocalNumber(branch, ct) if err != nil { return "", "", fmt.Errorf("getting local number: %w", err) } currNumAsInt, err := strconv.Atoi(currNum) if err != nil { return "", "", fmt.Errorf("calculating next number from current number %s: %w", currNum, err) } return strconv.Itoa(currNumAsInt + 1), numPath, nil } func updateEnvironmentReleaseNumber(number string, numberFilPath values.AbsolutePath) error { if len(number) == 0 { return fmt.Errorf("updating release number file %s because provided number was empty", numberFilPath) } return os.WriteFile(numberFilPath.String(), []byte(number+"\n"), 0644) }
84
eks-distro
aws
Go
package changetype import "fmt" type ChangeType string const ( Dev ChangeType = "development" Prod ChangeType = "production" Docs ChangeType = "docs" GHRelease ChangeType = "gh_release" ) func (ct ChangeType) String() string { return string(ct) } func (ct ChangeType) IsDevOrProd() bool { return ct == Dev || ct == Prod } func (ct ChangeType) GetDescription(version string) (string, error) { if ct.IsDevOrProd() { return fmt.Sprintf("Bumped %s release number for %s", ct.String(), version), nil } else if ct == Docs { return "Created and updated docs for " + version, nil } else { return "", fmt.Errorf("unknown ChangeType %v", ct) } }
31
eks-distro
aws
Go
package git import ( "fmt" "io" "os" "os/exec" "path/filepath" "github.com/aws/eks-distro/cmd/release/utils/values" ) var ( prScriptPathFromRoot = filepath.Join(values.GetGitRootDirectory(), "cmd/release/utils/git/open-pr.sh") outputStream io.Writer = os.Stdout errStream io.Writer = os.Stderr ) func (gm *Manager) OpenPR() error { if err := gm.currentBranchMustBeChangesBranch(); err != nil { return fmt.Errorf("checking expected branch before opening PR: %w", err) } prTitle, err := gm.ct.GetDescription(gm.version) if err != nil { return fmt.Errorf("getting PR description: %w", err) } cmd := exec.Command("/bin/bash", prScriptPathFromRoot, gm.changesBranch, prTitle) cmd.Stdout = outputStream cmd.Stderr = errStream if err = cmd.Run(); err != nil { if err = gm.abandonChangesBranch(); err != nil { fmt.Printf("checking out original branch after opening PR: %v\n", err) } return fmt.Errorf("opening PR: %v", err) } if err = gm.abandonChangesBranch(); err != nil { return fmt.Errorf("checking out original branch after opening PR: %w", err) } return nil }
46
eks-distro
aws
Go
package git import ( "bytes" "fmt" "os/exec" "strings" "github.com/aws/eks-distro/cmd/release/utils/values" ) const cmdName = "git" var ( baseCmd = []string{"-C", values.GetGitRootDirectory()} add = cmdRunner("add") commit = cmdRunner("commit", "-m") checkoutBranch = cmdRunner("checkout") checkoutNewBranch = cmdRunner("checkout", "-b") deleteBranch = cmdRunner("branch", "-D") restoreFile = cmdRunner("restore") restoreStagedFile = cmdRunner("restore", "--staged") showCurrentBranch = cmdOutput("branch", "--show-current") ) func cmdOutput(gitArgs ...string) func() ([]byte, error) { stdCmd := append(baseCmd, gitArgs...) return func() ([]byte, error) { output, err := exec.Command(cmdName, stdCmd...).Output() if err != nil { return []byte{}, fmt.Errorf("running %s\n%w\n%s\n", fmt.Sprintf("%s %s", cmdName, strings.Join(stdCmd, " ")), err, output) } return bytes.TrimSpace(output), nil } } func cmdRunner(gitArgs ...string) func(string) error { stdCmd := append(baseCmd, gitArgs...) return func(additionalArg string) error { allArgs := append(stdCmd, additionalArg) cmd := exec.Command(cmdName, allArgs...) output, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("running %q\n%w\n%s", strings.Join(cmd.Args, " "), err, output) } return nil } }
52
eks-distro
aws
Go
package projects import ( "fmt" "os" "path/filepath" "github.com/aws/eks-distro/cmd/release/utils/values" ) var projectPathRoot = filepath.Join(values.GetGitRootDirectory(), "projects") type Project struct { org string repo string } func GetProjects() ([]Project, error) { orgDirs, err := os.ReadDir(projectPathRoot) if err != nil { return []Project{}, fmt.Errorf("reading projects path: %w", err) } var projects []Project // Iterate through projects/<org> for _, orgDir := range orgDirs { if !orgDir.IsDir() { continue } repoDirs, err := os.ReadDir(filepath.Join(projectPathRoot, orgDir.Name())) if err != nil { return []Project{}, fmt.Errorf("reading repos paths: %w", err) } // Iterate through projects/<org>/<repo> for _, repoDir := range repoDirs { if repoDir.IsDir() { projects = append(projects, Project{org: orgDir.Name(), repo: repoDir.Name()}) } } } return projects, nil } func (p *Project) GetFilePath() string { return filepath.Join(projectPathRoot, p.org, p.repo) } func (p *Project) GetRepo() string { return p.repo } func (p *Project) GetOrg() string { return p.org } func (p *Project) GetGitHubURL() string { return fmt.Sprintf("https://github.com/%s/%s", p.GetOrg(), p.GetRepo()) } func (p *Project) GetVersion(releaseBranch string) (Version, error) { releaseBranchPath := filepath.Join(p.GetFilePath(), releaseBranch) gitTagVersion, err := readGitTagVersionFile(releaseBranchPath) if err != nil { return Version{}, fmt.Errorf("getting GitTag version: %w", err) } golangVersion, err := readGolangVersionFile(releaseBranchPath) if err != nil { return Version{}, fmt.Errorf("getting Golang version: %w", err) } return Version{ gitTag: string(gitTagVersion), golang: string(golangVersion), }, nil } func GetProjectPathRoot() string { return projectPathRoot }
79
eks-distro
aws
Go
package projects import ( "bytes" "fmt" "os" "path/filepath" ) const ( gitTagFilename = "GIT_TAG" golangFilename = "GOLANG_VERSION" ) type Version struct { gitTag string golang string } func (v *Version) GetGitTag() string { return v.gitTag } func (v *Version) GetGolang() string { return v.golang } func readGitTagVersionFile(parentPath string) ([]byte, error) { return readVersionFile(filepath.Join(parentPath, gitTagFilename)) } func readGolangVersionFile(parentPath string) ([]byte, error) { return readVersionFile(filepath.Join(parentPath, golangFilename)) } func readVersionFile(versionFilepath string) ([]byte, error) { fileOutput, err := os.ReadFile(versionFilepath) if err != nil { return []byte{}, fmt.Errorf("reading version at %s path:%w", versionFilepath, err) } return bytes.TrimSpace(fileOutput), nil }
43
eks-distro
aws
Go
package release import ( "fmt" "github.com/aws/eks-distro/cmd/release/utils/changetype" ) const ( MinNumber = 0 InvalidNumberUpperLimit = MinNumber - 1 ) type Release struct { branch string number string kubernetesGitTag string // e.g. v1.23.1 tag string // e.g. v1-23-eks-1 manifestURL string // e.g. https://distro.eks.amazonaws.com/kubernetes-1-23/kubernetes-1-23-eks-1.yaml } // NewRelease returns complete Release based on the provided inputBranch. Provided ct cannot be dev or prod. func NewRelease(rb string, ct changetype.ChangeType) (*Release, error) { if ct.IsDevOrProd() { return &Release{}, fmt.Errorf("release cannot be for prod or dev, as it assumes these changes are done") } r, err := newRelease(rb, "", false) if err != nil { return &Release{}, fmt.Errorf("creating release values: %w", err) } return r, nil } // NewReleaseOverrideNumber pities the fool who trifles with it. // This function disregards the usual process of getting the release number from the local environment (like NewRelease // does) and instead uses the provided overrideNumber value to produce the number and other number-dependent values in // the returned release. This can cause many unexpected problems, so this function must be used with the upmost caution. // Unless there is a VERY specific reason to use this function, NewRelease should be used instead. func NewReleaseOverrideNumber(rb string, overrideNumber string) (*Release, error) { r, err := newRelease(rb, overrideNumber, true) if err != nil { return &Release{}, fmt.Errorf("creating release values with override number: %w", err) } return r, nil } // Branch return the release branch. Example: 1-23 func (r *Release) Branch() string { return r.branch } // Number return the release number. Example: 1 func (r *Release) Number() string { return r.number } // KubernetesGitTag return the full kubernetes version. Example: v1.23.7 func (r *Release) KubernetesGitTag() string { return r.kubernetesGitTag } // KubernetesURL returns the url to the Kubernetes version release. Example: https://github.com/kubernetes/kubernetes/release/tag/v1.23.7 func (r *Release) KubernetesURL() string { return fmt.Sprintf("https://github.com/kubernetes/kubernetes/release/tag/%s", r.kubernetesGitTag) } // KubernetesMinorVersion returns the minor version. Example: 1.24 func (r *Release) KubernetesMinorVersion() string { return r.kubernetesGitTag[1:5] } // Tag returns v<branch>-eks-<number>. Example: v1-23-eks-1 func (r *Release) Tag() string { return r.tag } // ManifestURL returns manifest url. Example: https://distro.eks.amazonaws.com/kubernetes-1-23/kubernetes-1-23-eks-1.yaml func (r *Release) ManifestURL() string { return r.manifestURL }
83
eks-distro
aws
Go
package release import ( "fmt" "strconv" "strings" "github.com/aws/eks-distro/cmd/release/utils/changetype" "github.com/aws/eks-distro/cmd/release/utils/values" ) const defaultReleaseEnv = changetype.Prod func newRelease(releaseBranchInput string, overrideNumInput string, hasOverrideNum bool) (*Release, error) { rb, num, err := generateReleaseInput(releaseBranchInput, overrideNumInput, hasOverrideNum) if err != nil { if hasOverrideNum { return &Release{}, fmt.Errorf("creating release input with override number: %w", err) } return &Release{}, fmt.Errorf("creating release input: %w", err) } k8sGitTag, err := values.GetGitTag("kubernetes", "kubernetes", rb) if err != nil { return &Release{}, fmt.Errorf("getting Kubernetes Git Tag: %w", err) } return &Release{ branch: rb, number: num, kubernetesGitTag: string(k8sGitTag), tag: fmt.Sprintf("v%s-eks-%s", rb, num), manifestURL: fmt.Sprintf("https://distro.eks.amazonaws.com/kubernetes-%s/kubernetes-%s-eks-%s.yaml", rb, rb, num), }, nil } // createNewReleaseInput returns func generateReleaseInput(releaseBranchInput string, overrideNumInput string, hasOverrideNum bool) (string, string, error) { overrideNum := strings.TrimSpace(overrideNumInput) if hasOverrideNum && len(overrideNum) == 0 { return "", "", fmt.Errorf("expected non-empty override number") } releaseBranch := strings.TrimSpace(releaseBranchInput) if len(releaseBranch) == 0 { return "", "", fmt.Errorf("release branch cannot be an empty string") } // Release Branch must be a supported release branch if isSupported, err := values.IsSupportedReleaseBranch(releaseBranch); !isSupported || err != nil { if err != nil { return "", "", fmt.Errorf("checking if supported release branch: %w", err) } return "", "", fmt.Errorf("branch %s is not a supported release branch", releaseBranch) } // If provided, override number much be greater than or equal to minOverrideNumber. If override number is not // provided, var number string localNum, _, err := values.GetLocalNumber(releaseBranch, defaultReleaseEnv) if err != nil { return "", "", fmt.Errorf("determining number: %w", err) } if hasOverrideNum { if strings.Compare(strconv.Itoa(MinNumber), overrideNum) == 1 || strings.Compare(overrideNum, localNum) == 1 { return "", "", fmt.Errorf("override number %s must between min number %d and local number %s (inclusive)", overrideNum, MinNumber, localNum) } number = overrideNum } else { number = localNum } return releaseBranch, number, nil }
77
eks-distro
aws
Go
package release import ( "reflect" "testing" "github.com/aws/eks-distro/cmd/release/utils/changetype" ) const ( validCT = changetype.Docs validReleaseBranch = "1-24" // update when 1-24 no longer supported // Only valid if Release was made with override number validOverrideNumber = "1" validKubernetesGitTag = "v1.24.6" validTag = "v1-24-eks-1" validManifestURL = "https://distro.eks.amazonaws.com/kubernetes-1-24/kubernetes-1-24-eks-1.yaml" validKubernetesMinorVersion = "1.24" validKubernetesURL = "https://github.com/kubernetes/kubernetes/release/tag/" + validKubernetesGitTag ) var ( // Only valid if Release was made with override number testFields = fields{ branch: validReleaseBranch, number: validOverrideNumber, kubernetesGitTag: validKubernetesGitTag, tag: validTag, manifestURL: validManifestURL, } ) type fields struct { branch string number string kubernetesGitTag string tag string manifestURL string } func TestNewRelease(t *testing.T) { type args struct { rb string ct changetype.ChangeType } tests := []struct { name string args args want *Release wantErr bool errMsgContains string }{ { name: "error_if_empty_release_branch", args: args{rb: "", ct: validCT}, want: &Release{}, wantErr: true, errMsgContains: "release branch cannot be an empty string", }, { name: "error_if_invalid_release_branch", args: args{rb: "foo", ct: validCT}, want: &Release{}, wantErr: true, errMsgContains: "release branch cannot be an empty string", }, { name: "error_if_change_type_is_dev", args: args{rb: validReleaseBranch, ct: changetype.Dev}, want: &Release{}, wantErr: true, errMsgContains: "release cannot be for prod or dev", }, { name: "error_if_change_type_is_prod", args: args{rb: validReleaseBranch, ct: changetype.Prod}, want: &Release{}, wantErr: true, errMsgContains: "release cannot be for prod or dev", }, // TODO add test for no errors } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewRelease(tt.args.rb, tt.args.ct) if (err != nil) != tt.wantErr { t.Errorf("NewRelease() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("NewRelease() got = %v, want %v", got, tt.want) } }) } } func TestNewReleaseOverrideNumber(t *testing.T) { type args struct { rb string overrideNumber string } tests := []struct { name string args args want *Release wantErr bool errMsgContains string }{ { name: "error_if_empty_release_branch", args: args{rb: "", overrideNumber: validOverrideNumber}, want: &Release{}, wantErr: true, errMsgContains: "release branch cannot be an empty string", }, { name: "error_if_invalid_release_branch", args: args{rb: "foo", overrideNumber: validOverrideNumber}, want: &Release{}, wantErr: true, errMsgContains: "release branch cannot be an empty string", }, { name: "error_if_override_number_is_empty", args: args{rb: validReleaseBranch, overrideNumber: ""}, want: &Release{}, wantErr: true, errMsgContains: "expected non-empty override number", }, { name: "error_if_override_number_is_only_whitespace", args: args{rb: validReleaseBranch, overrideNumber: " "}, want: &Release{}, wantErr: true, errMsgContains: "release cannot be for prod or dev", }, { name: "error_if_override_number_is_less_than_min", args: args{rb: validReleaseBranch, overrideNumber: ""}, want: &Release{}, wantErr: true, errMsgContains: "expected non-empty override number", }, { name: "error_if_override_number_is_greater_than_local", args: args{rb: validReleaseBranch, overrideNumber: "9999999999999"}, want: &Release{}, wantErr: true, errMsgContains: "expected non-empty override number", }, { name: "returns_release_if_valid_input", args: args{rb: validReleaseBranch, overrideNumber: validOverrideNumber}, want: &Release{ branch: validReleaseBranch, number: validOverrideNumber, kubernetesGitTag: validKubernetesGitTag, tag: validTag, manifestURL: validManifestURL, }, wantErr: false, }, // TODO: add test to confirm override with local number and NewRelease make the same Release } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewReleaseOverrideNumber(tt.args.rb, tt.args.overrideNumber) if (err != nil) != tt.wantErr { t.Errorf("NewReleaseOverrideNumber() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("NewReleaseOverrideNumber() got = %v, want %v", got, tt.want) } }) } } func TestRelease_Branch(t *testing.T) { tests := []struct { name string fields fields want string }{ { name: "returns_release_branch", fields: testFields, want: testFields.branch, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Release{ branch: tt.fields.branch, number: tt.fields.number, kubernetesGitTag: tt.fields.kubernetesGitTag, tag: tt.fields.tag, manifestURL: tt.fields.manifestURL, } if got := r.Branch(); got != tt.want { t.Errorf("Branch() = %v, want %v", got, tt.want) } }) } } func TestRelease_KubernetesGitTag(t *testing.T) { tests := []struct { name string fields fields want string }{ { name: "returns_release_kubernetes_git_tag", fields: testFields, want: testFields.kubernetesGitTag, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Release{ branch: tt.fields.branch, number: tt.fields.number, kubernetesGitTag: tt.fields.kubernetesGitTag, tag: tt.fields.tag, manifestURL: tt.fields.manifestURL, } if got := r.KubernetesGitTag(); got != tt.want { t.Errorf("KubernetesGitTag() = %v, want %v", got, tt.want) } }) } } func TestRelease_KubernetesMinorVersion(t *testing.T) { tests := []struct { name string fields fields want string }{ { name: "returns_release_kubernetes_minor_version", fields: testFields, want: validKubernetesMinorVersion, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Release{ branch: tt.fields.branch, number: tt.fields.number, kubernetesGitTag: tt.fields.kubernetesGitTag, tag: tt.fields.tag, manifestURL: tt.fields.manifestURL, } if got := r.KubernetesMinorVersion(); got != tt.want { t.Errorf("KubernetesMinorVersion() = %v, want %v", got, tt.want) } }) } } func TestRelease_KubernetesURL(t *testing.T) { tests := []struct { name string fields fields want string }{ { name: "returns_release_kubernetes_url", fields: testFields, want: validKubernetesURL, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Release{ branch: tt.fields.branch, number: tt.fields.number, kubernetesGitTag: tt.fields.kubernetesGitTag, tag: tt.fields.tag, manifestURL: tt.fields.manifestURL, } if got := r.KubernetesURL(); got != tt.want { t.Errorf("KubernetesURL() = %v, want %v", got, tt.want) } }) } } func TestRelease_ManifestURL(t *testing.T) { tests := []struct { name string fields fields want string }{ { name: "returns_release_manifest_url", fields: testFields, want: testFields.manifestURL, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Release{ branch: tt.fields.branch, number: tt.fields.number, kubernetesGitTag: tt.fields.kubernetesGitTag, tag: tt.fields.tag, manifestURL: tt.fields.manifestURL, } if got := r.ManifestURL(); got != tt.want { t.Errorf("ManifestURL() = %v, want %v", got, tt.want) } }) } } func TestRelease_Number(t *testing.T) { tests := []struct { name string fields fields want string }{ { name: "returns_release_number", fields: testFields, want: testFields.number, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Release{ branch: tt.fields.branch, number: tt.fields.number, kubernetesGitTag: tt.fields.kubernetesGitTag, tag: tt.fields.tag, manifestURL: tt.fields.manifestURL, } if got := r.Number(); got != tt.want { t.Errorf("Number() = %v, want %v", got, tt.want) } }) } } func TestRelease_Tag(t *testing.T) { tests := []struct { name string fields fields want string }{ { name: "returns_release_tag", fields: testFields, want: testFields.tag, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Release{ branch: tt.fields.branch, number: tt.fields.number, kubernetesGitTag: tt.fields.kubernetesGitTag, tag: tt.fields.tag, manifestURL: tt.fields.manifestURL, } if got := r.Tag(); got != tt.want { t.Errorf("Tag() = %v, want %v", got, tt.want) } }) } }
375
eks-distro
aws
Go
package values import ( "bytes" "fmt" "io" "net/http" "regexp" "sort" "strings" ) const ( ecrBase = "public.ecr.aws/eks-distro" expectedStatusCode = 200 ) type component struct { name, version, uri []byte } func GetComponentsFromReleaseManifest(releaseManifestURL string) (string, error) { resp, err := http.Get(releaseManifestURL) if err != nil { return "", fmt.Errorf("getting release manifest: %w\n", err) } defer resp.Body.Close() if resp.StatusCode != expectedStatusCode { return "", fmt.Errorf("got status code %v when getting release manifest (expected %d)", resp.StatusCode, expectedStatusCode) } body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("reading release manifest: %w", err) } re := regexp.MustCompile(fmt.Sprintf(`uri: (%s.*)`, ecrBase)) foundMatches := re.FindAllSubmatch(body, -1) var components []component // Example uri value and what it creates from this: // uri: public.ecr.aws/eks-distro/etcd-io/etcd:v3.4.14-eks-1-18-5 captureRegex := regexp.MustCompile(`[^/]+:v[0-9.]+`) for _, matchPair := range foundMatches { uri := matchPair[1] nameAndVersion := bytes.Split(captureRegex.Find(uri), []byte(":")) components = append(components, component{ name: nameAndVersion[0], version: nameAndVersion[1], uri: uri, }) } uriEndingAndReleaseBranchRegexp := regexp.MustCompile(`-eks-(.*)-[0-9]+$`) // ["-eks-1-23-444" "1-23"] uriEndingAndReleaseBranch := uriEndingAndReleaseBranchRegexp.FindSubmatch(components[0].uri) uriEnding, releaseBranch := uriEndingAndReleaseBranch[0], uriEndingAndReleaseBranch[1] assetsNotInReleaseManifest := [][]byte{[]byte("go-runner"), []byte("kube-proxy-base")} kubernetesReleaseGitTag, err := GetGitTag("kubernetes", "release", string(releaseBranch)) if err != nil { return "", fmt.Errorf("getting Kubernetes git tag for release manifest: %w", err) } for _, asset := range assetsNotInReleaseManifest { components = append(components, component{ name: asset, version: kubernetesReleaseGitTag, uri: []byte(fmt.Sprintf("%s/kubernetes/%s:%s%s", ecrBase, asset, kubernetesReleaseGitTag, uriEnding)), }) } var tableRows []string for _, c := range components { tableRows = append(tableRows, fmt.Sprintf("| %s | %s | %s |", c.name, c.version, c.uri)) } sort.Strings(tableRows) tableRows = append([]string{"| Name | Version | URI |", "|------|---------|-----|"}, tableRows...) return strings.Join(tableRows, "\n"), nil }
81
eks-distro
aws
Go
package values import ( "strings" "testing" ) func TestGetComponentsFromReleaseManifest(t *testing.T) { type args struct { url string } tests := []struct { name string args args want string wantErr bool errMsgContains string }{ { name: "error_if_invalid_URL", args: args{url: "Hello!_I'm_an_invalid_URL"}, want: "", wantErr: true, }, { name: "error_if_nonexistent_URL", args: args{url: "https://distro.eks.amazonaws.com/kubernetes-1-23/kubernetes-1-23-eks-FOOOO.yaml"}, want: "", wantErr: true, errMsgContains: "status code 403", }, { name: "return_expected_if_valid_URL", args: args{url: validDataOne.url}, want: validDataOne.expectedOutput, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := GetComponentsFromReleaseManifest(tt.args.url) if err != nil { if !tt.wantErr { t.Errorf("GetComponentsFromReleaseManifest() error = %v, wantErr %v", err, tt.wantErr) return } if len(tt.errMsgContains) > 0 && !strings.Contains(err.Error(), tt.errMsgContains) { t.Errorf("GetComponentsFromReleaseManifest() got = %q, expected error message to contain %q", err, tt.errMsgContains) return } } if got != tt.want { t.Errorf("GetComponentsFromReleaseManifest() got = %v, want %v", got, tt.want) } }) } } type validData struct { url string expectedOutput string } var validDataOne = validData{ url: "https://distro.eks.amazonaws.com/kubernetes-1-23/kubernetes-1-23-eks-1.yaml", expectedOutput: `| Name | Version | URI | |------|---------|-----| | aws-iam-authenticator | v0.5.8 | public.ecr.aws/eks-distro/kubernetes-sigs/aws-iam-authenticator:v0.5.8-eks-1-23-1 | | coredns | v1.8.7 | public.ecr.aws/eks-distro/coredns/coredns:v1.8.7-eks-1-23-1 | | csi-snapshotter | v5.0.1 | public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter:v5.0.1-eks-1-23-1 | | etcd | v3.5.3 | public.ecr.aws/eks-distro/etcd-io/etcd:v3.5.3-eks-1-23-1 | | external-attacher | v3.4.0 | public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v3.4.0-eks-1-23-1 | | external-provisioner | v3.1.0 | public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v3.1.0-eks-1-23-1 | | external-resizer | v1.4.0 | public.ecr.aws/eks-distro/kubernetes-csi/external-resizer:v1.4.0-eks-1-23-1 | | go-runner | v0.13.0 | public.ecr.aws/eks-distro/kubernetes/go-runner:v0.13.0-eks-1-23-1 | | kube-apiserver | v1.23.6 | public.ecr.aws/eks-distro/kubernetes/kube-apiserver:v1.23.6-eks-1-23-1 | | kube-controller-manager | v1.23.6 | public.ecr.aws/eks-distro/kubernetes/kube-controller-manager:v1.23.6-eks-1-23-1 | | kube-proxy | v1.23.6 | public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.6-eks-1-23-1 | | kube-proxy-base | v0.13.0 | public.ecr.aws/eks-distro/kubernetes/kube-proxy-base:v0.13.0-eks-1-23-1 | | kube-scheduler | v1.23.6 | public.ecr.aws/eks-distro/kubernetes/kube-scheduler:v1.23.6-eks-1-23-1 | | livenessprobe | v2.7.0 | public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.7.0-eks-1-23-1 | | metrics-server | v0.6.1 | public.ecr.aws/eks-distro/kubernetes-sigs/metrics-server:v0.6.1-eks-1-23-1 | | node-driver-registrar | v2.5.0 | public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.5.0-eks-1-23-1 | | pause | v1.23.6 | public.ecr.aws/eks-distro/kubernetes/pause:v1.23.6-eks-1-23-1 | | snapshot-controller | v5.0.1 | public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/snapshot-controller:v5.0.1-eks-1-23-1 | | snapshot-validation-webhook | v5.0.1 | public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/snapshot-validation-webhook:v5.0.1-eks-1-23-1 |`, }
89
eks-distro
aws
Go
package values import ( "fmt" "os" "os/exec" "strings" ) // If an error is encountered while assigning the value to gitRootDirectory, the program is terminated by panic(). // This heavy-handed response is because so much of the program depends on the gitRootDirectory value. If this value is // missing, there is no reason to proceed. The program simply will not be successful without it. var gitRootDirectory = func() string { gitRootOutput, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() if err != nil { panic(fmt.Sprintf("Unable to get git root directory: %v", err)) } return strings.Join(strings.Fields(string(gitRootOutput)), "") }() // GetGitRootDirectory returns path to the project's root. There is no trailing "/". // Example: /Users/lovelace/go/eks-distro func GetGitRootDirectory() string { return gitRootDirectory } // NewDirectory should not be created directly and instead should only be generated by calling MakeNewDirectory. type NewDirectory struct { ap AbsolutePath } // MakeNewDirectory creates a new directory at provided ap. The provided ap cannot be a path to an existing directory. func MakeNewDirectory(ap AbsolutePath) (*NewDirectory, error) { if err := os.Mkdir(ap.String(), 0777); err != nil { return nil, fmt.Errorf("creating release docs directory: %w", err) } return &NewDirectory{ap}, nil } // String returns the absolute path value as a string. func (nd *NewDirectory) String() string { return nd.ap.String() } // RemoveNewDirectory delete the directory and the associated path value in nd. func (nd *NewDirectory) RemoveNewDirectory() error { if err := os.RemoveAll(nd.String()); err != nil { return fmt.Errorf("deleting directory %s: %w", nd.String(), err) } nd.ap = "" return nil } // StripRootDirectory returns the absolute filepath with the root directory path removed. func (nd *NewDirectory) StripRootDirectory() string { return nd.ap.StripRootDirectory() }
58
eks-distro
aws
Go
package values import "fmt" const ( IndexFileName = "index.md" ReleaseAnnouncementFileName = "release-announcement.txt" ) type ReleaseTag interface { Tag() string } func GetChangelogFileName(rt ReleaseTag) string { return fmt.Sprintf("CHANGELOG-%s.md", rt.Tag()) }
17
eks-distro
aws
Go
package values import ( "path/filepath" "strings" ) var ( IndexPath = getAbsolutePath("docs", "contents", IndexFileName) ReadmePath = getAbsolutePath("README.md") docsLocalPath = filepath.Join("docs", "contents", "releases") ) type PathValues interface { Branch() string Number() string } // GetReleaseBranchDocsDirectory returns the expected and absolute filepath for the release branch directory for the // provided PathValues. There is no guarantee this directory actually exists. The filepath is simply where it should // exist. // Example: ~/go/eks-distro/docs/contents/releases/1-24 func GetReleaseBranchDocsDirectory(pv PathValues) AbsolutePath { return getAbsolutePath(docsLocalPath, pv.Branch()) } // GetReleaseDocsDirectory returns the expected and absolute filepath for the release doc directory for the provided // PathValues. There is no guarantee this directory actually exists. The filepath is simply where it should exist. // Example: ~/go/eks-distro/docs/contents/releases/1-24/1 func GetReleaseDocsDirectory(pv PathValues) AbsolutePath { return getAbsolutePath(docsLocalPath, pv.Branch(), pv.Number()) } func getGitTagPath(projectOrg, projectName, releaseBranch string) AbsolutePath { return getAbsolutePath("projects", projectOrg, projectName, releaseBranch, "GIT_TAG") } func getNumberPath(branch, changeTypeString string) AbsolutePath { return getAbsolutePath("release", branch, changeTypeString, "RELEASE") } type AbsolutePath string func (ap AbsolutePath) String() string { return string(ap) } // StripRootDirectory returns the absolute file path with the root directory path removed. func (ap AbsolutePath) StripRootDirectory() string { return strings.TrimPrefix(ap.String(), GetGitRootDirectory()+"/") } func getAbsolutePath(parentDirs ...string) AbsolutePath { return AbsolutePath(filepath.Join(GetGitRootDirectory(), filepath.Join(parentDirs...))) }
56
eks-distro
aws
Go
package values import ( "bytes" "fmt" "os" "strings" "github.com/aws/eks-distro/cmd/release/utils/changetype" ) // GetLocalNumber returns the current number and the filepath to the local file used to determine that number for the // provided branch and ct. The returned num only reflects the local environment and may not match the upstream source's // current number for the branch or ct. Pulling down upstream changes before using this function is highly recommended. // Provided branch must exist, the provided ct must be Dev or Prod, and the release number file must exist in the // expected location (e.g., /Users/lovelace/go/eks-distro/release/1-24/development/RELEASE) func GetLocalNumber(branch string, ct changetype.ChangeType) (num string, numPath AbsolutePath, err error) { numPath = getNumberPath(branch, ct.String()) fileOutput, err := os.ReadFile(numPath.String()) if err != nil { return "", "", fmt.Errorf("reading number from %s file: %w", numPath, err) } return strings.TrimSpace(string(fileOutput)), numPath, nil } func GetGitTag(projectOrg, projectName, releaseBranch string) ([]byte, error) { gitTagPath := getGitTagPath(projectOrg, projectName, releaseBranch) fileOutput, err := os.ReadFile(gitTagPath.String()) if err != nil { return []byte{}, fmt.Errorf("reading git tag at %s path:%w", gitTagPath, err) } return bytes.TrimSpace(fileOutput), nil }
34
eks-distro
aws
Go
package values import ( "bytes" "fmt" "os" "strconv" "strings" ) var ( supportedReleaseBranchesPath = getAbsolutePath("release", "SUPPORTED_RELEASE_BRANCHES").String() defaultReleaseBranchPath = getAbsolutePath("release", "DEFAULT_RELEASE_BRANCH").String() ) func IsDefaultReleaseBranch(providedReleaseBranch string) (bool, error) { fileOutput, err := os.ReadFile(defaultReleaseBranchPath) if err != nil { return false, fmt.Errorf("getting default release branch at %s path:%w", defaultReleaseBranchPath, err) } defaultReleaseBranch := strings.TrimSpace(string(fileOutput)) return strings.Compare(providedReleaseBranch, defaultReleaseBranch) == 0, nil } func IsSupportedReleaseBranch(rb string) (bool, error) { supportedReleaseBranches, err := GetSupportedReleaseBranches() if err != nil { return false, fmt.Errorf("getting release branches to check if %s is supported: %w", rb, err) } providedReleaseBranch := []byte(rb) for _, supportedReleaseBranch := range supportedReleaseBranches { if bytes.Equal(supportedReleaseBranch, providedReleaseBranch) { return true, nil } } return false, nil } func GetSupportedReleaseBranches() ([][]byte, error) { fileOutput, err := os.ReadFile(supportedReleaseBranchesPath) if err != nil { return [][]byte{}, fmt.Errorf("getting supported release branches at %s path:%w", supportedReleaseBranchesPath, err) } return bytes.Split(bytes.TrimSpace(fileOutput), []byte("\n")), nil } func GetSupportedReleaseBranchesStrings() ([]string, error) { rbs, err := GetSupportedReleaseBranches() if err != nil { return []string{}, fmt.Errorf("getting supported release before converting to string:%w", err) } var rbsStrings []string for _, rb := range rbs { rbsStrings = append(rbsStrings, string(rb)) } return rbsStrings, nil } func GetLatestSupportedReleaseBranch() ([]byte, error) { supportedReleaseBranches, err := GetSupportedReleaseBranches() if err != nil { return []byte{}, fmt.Errorf("getting release branches to find lastest supported release branches: %w", err) } return supportedReleaseBranches[len(supportedReleaseBranches)-1], nil } // AddNextReleaseBranch Returns added release branch if no error adding it to file func AddNextReleaseBranch() ([]byte, error) { nextReleaseBranch, err := getNextReleaseBranch() if err != nil { return []byte{}, fmt.Errorf("getting next release branch to add to supported: %w", err) } releaseBranches, err := GetSupportedReleaseBranches() if err != nil { return []byte{}, fmt.Errorf("getting supported release branches to add %v: %w", nextReleaseBranch, err) } releaseBranches = append(releaseBranches, nextReleaseBranch) if err = os.WriteFile(supportedReleaseBranchesPath, append(bytes.Join(releaseBranches, []byte("\n")), []byte("\n")...), 0644); err != nil { return []byte{}, fmt.Errorf("writing supported release branches to file: %w", err) } return nextReleaseBranch, nil } func getNextReleaseBranch() ([]byte, error) { latestSupportedReleaseBranch, err := GetLatestSupportedReleaseBranch() if err != nil { return []byte{}, fmt.Errorf("getting lastest supported release branch to find next release branch: %w", err) } // latestSupportedReleaseBranch format expected to be 1-XX, e.g. 1-26 prefix := string(latestSupportedReleaseBranch[:2]) latestMinorNum, err := strconv.Atoi(string(latestSupportedReleaseBranch[2:])) if err != nil { return []byte{}, fmt.Errorf("converting the minor release number %v to int: %w", latestMinorNum, err) } return []byte(prefix + strconv.Itoa(latestMinorNum+1)), nil }
102
eks-distro
aws
Go
package values import ( "context" "fmt" "strings" "github.com/google/go-github/v52/github" ) const ( baseQuery = "repo:aws/eks-distro is:pr is:merged" githubTimeFormat = "2006-01-02T15:04:05+00:00" ) func GetChangelogPRs(releaseVersion string, overrideNumber int) (string, error) { githubClient := github.NewClient(nil) ctx := context.Background() opts := &github.SearchOptions{Sort: "updated"} //Get the date of the last document release for the release version prs, _, err := githubClient.Search.Issues(ctx, "is:pr is:merged label:release label:documentation repo:aws/eks-distro label:"+releaseVersion, opts) if err != nil { return "", fmt.Errorf("getting PRs from %v: %w", githubClient, err) } lastDocRelease := githubTimeFormat prevDocRelease := githubTimeFormat if len(prs.Issues) > 0 { //Select the most recent pr from the above query and format the date expected for the go-github client releasePRs, _, err := githubClient.Search.Issues(ctx, "is:pr is:merged label:PROD-release label:"+releaseVersion, opts) if err != nil { return "", fmt.Errorf("get release PRs from %v: %w", githubClient, err) } lastDocRelease = releasePRs.Issues[0].ClosedAt.Format(githubTimeFormat) prevDocRelease = prs.Issues[0].ClosedAt.Format(githubTimeFormat) } else { //With no document releases we need to be a little bit clever to generate unannounced changelogs. //This finds the opts = &github.SearchOptions{Sort: "created", Order: "asc"} prs, _, err := githubClient.Search.Issues(ctx, "is:pr is:merged label:PROD-release label:"+releaseVersion, opts) if err != nil { return "", fmt.Errorf("get PRs from %v: %w", githubClient, err) } if overrideNumber == 1 { lastDocRelease = prs.Issues[overrideNumber-1].ClosedAt.Format(githubTimeFormat) } else { lastDocRelease = prs.Issues[overrideNumber-1].ClosedAt.Format(githubTimeFormat) prevDocRelease = prs.Issues[overrideNumber-2].ClosedAt.Format(githubTimeFormat) } } patchPRs, _, err := githubClient.Search.Issues(ctx, fmt.Sprintf("%v merged:%v..%v label:patch label:%v", baseQuery, prevDocRelease, lastDocRelease, releaseVersion), opts) if err != nil { return "", fmt.Errorf("getting patch prs: %w", err) } baseImgPRs, _, err := githubClient.Search.Issues(ctx, fmt.Sprintf("%v merged:%v..%v label:base-img-pkg-update", baseQuery, prevDocRelease, lastDocRelease), opts) if err != nil { return "", fmt.Errorf("getting base image prs: %w", err) } versPRs, _, err := githubClient.Search.Issues(ctx, fmt.Sprintf("%v merged:%v..%v label:project label:%v", baseQuery, prevDocRelease, lastDocRelease, releaseVersion), opts) if err != nil { return "", fmt.Errorf("getting project prs: %w", err) } var changelog []string changelog = append(changelog, PRsSinceLastRelease(patchPRs, "### Patches")) changelog = append(changelog, PRsSinceLastRelease(versPRs, "### Projects")) changelog = append(changelog, PRsSinceLastRelease(baseImgPRs, "### Base Image")) return strings.Join(changelog, "\n"), nil } func PRsSinceLastRelease(prs *github.IssuesSearchResult, sectionName string) string { var section []string section = append(section, sectionName) if len(prs.Issues) == 0 { section = append(section, "No changes since last release") } for _, pr := range prs.Issues { section = append(section, fmt.Sprintf("* %v ([%v](%v))", *pr.Title, *pr.Number, *pr.HTMLURL)) } return strings.Join(section, "\n") + "\n" }
93
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Iptables-wrapper tries to detect which iptables mode is being used by the host even when being run from a container. It then updates the iptables commands to point to the right binaries for that mode. Before exiting it re-executes the given command. The process is as follows: 1. Calls `xtables-<mode>-multi` and checks if the kubelet rules exists. It searches for different patterns in the configured rules, trying to match different kubernetes versions, and it uses the results to guess which mode is in use. 2. Updates the alternatives/symlinks to point to the proper binaries for the detected mode. Depending on the OS it uses `update-alternatives`, `alternatives` or it manually creates symlinks. 3. Re-execs the original command received by this binary. We assume this binary has been symlinked to some/all iptables binaries and whatever was received here was intended to be an iptables-* command. If that is not the case and this command is either executed directly or through a symlink that doesn't point to an iptables binary, it will enter an infinite loop, calling itself recursively. It's important to note that this proxy behavior will only happen on the first iptables-* execution. Following invocations will use directly the binaries for the selected mode. */ package main import ( "context" "errors" "fmt" "os" "os/exec" "github.com/kubernetes-sigs/iptables-wrappers/internal/iptables" ) func main() { ctx := context.Background() sbinPath, err := iptables.DetectBinaryDir() if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err) os.Exit(1) } // We use `xtables-<mode>-multi` binaries by default to inspect the installed rules, // but this can be changed to directly use `iptables-<mode>-save` binaries. mode := iptables.DetectMode(ctx, iptables.NewXtablesMultiInstallation(sbinPath)) // This re-executes the exact same command passed to this program binaryPath := os.Args[0] var args []string if len(os.Args) > 1 { args = os.Args[1:] } selector := iptables.BuildAlternativeSelector(sbinPath) if err := selector.UseMode(ctx, mode); err != nil { fmt.Fprintf(os.Stderr, "Unable to redirect iptables binaries. (Are you running in an unprivileged pod?): %s\n", err) // fake it, though this will probably also fail if they aren't root binaryPath = iptables.XtablesPath(sbinPath, mode) args = os.Args } cmdIPTables := exec.CommandContext(ctx, binaryPath, args...) cmdIPTables.Stdout = os.Stdout cmdIPTables.Stderr = os.Stderr if err := cmdIPTables.Run(); err != nil { code := 1 var exitErr *exec.ExitError if errors.As(err, &exitErr) { code = exitErr.ExitCode() } else { // If it's not an ExitError, the command probably didn't finish and something // else failed, which means it might not had outputted anything. In that case, // print the error message just in case. fmt.Fprintf(os.Stderr, "Error: %s\n", err) } os.Exit(code) } }
94
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package commands import ( "bytes" "fmt" "os/exec" ) // RunAndReadError runs a exec.Cmd and tries to extract the error message from stderr // if present and it includes it in the returned error. This overrides the Stderr in cmd if // present. func RunAndReadError(cmd *exec.Cmd) error { var stderr bytes.Buffer cmd.Stderr = &stderr if err := cmd.Run(); err != nil { if stderr.Len() > 0 { err = fmt.Errorf("%s: %v", stderr.String(), err) } return err } return nil }
39
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package files import "os" // ExecutableExists checks if a file exists and it's executable by someone. func ExecutableExists(path string) bool { stat, err := os.Stat(path) return err == nil && stat.Mode()&0o111 != 0 }
23
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package iptables import ( "context" "fmt" "os" "os/exec" "path/filepath" "github.com/kubernetes-sigs/iptables-wrappers/internal/commands" "github.com/kubernetes-sigs/iptables-wrappers/internal/files" ) // AlternativeSelector allows to configure a system to use iptables in // nft or legacy mode. type AlternativeSelector interface { // UseMode configures the system to use the selected iptables mode. UseMode(ctx context.Context, mode Mode) error } // BuildAlternativeSelector builds the proper iptablesAlternativeSelector depending // on the machine's setup. It will use either `alternatives` or `update-alternatives` if present // in the sbin folder. If none is present, it will manage iptables binaries by manually // creating symlinks. func BuildAlternativeSelector(sbinPath string) AlternativeSelector { if files.ExecutableExists(filepath.Join(sbinPath, "alternatives")) { return alternativesSelector{sbinPath: sbinPath} } else if files.ExecutableExists(filepath.Join(sbinPath, "update-alternatives")) { return updateAlternativesSelector{sbinPath: sbinPath} } else { // if we don't find any tool to managed the alternatives, handle it manually with symlinks return symlinkSelector{sbinPath: sbinPath} } } // updateAlternativesSelector manages an iptables setup by using the `update-alternatives` binary. // This is most common for debian based OSs. type updateAlternativesSelector struct { sbinPath string } func (u updateAlternativesSelector) UseMode(ctx context.Context, mode Mode) error { modeStr := string(mode) if err := commands.RunAndReadError(exec.CommandContext(ctx, "update-alternatives", "--set", "iptables", filepath.Join(u.sbinPath, "iptables-"+modeStr))); err != nil { return fmt.Errorf("update-alternatives iptables to mode %s: %v", modeStr, err) } if err := commands.RunAndReadError(exec.CommandContext(ctx, "update-alternatives", "--set", "ip6tables", filepath.Join(u.sbinPath, "ip6tables-"+modeStr))); err != nil { return fmt.Errorf("update-alternatives ip6tables to mode %s: %v", modeStr, err) } return nil } // alternativesSelector manages an iptables setup by using the `alternatives` binary. // This is most common for fedora based OSs. type alternativesSelector struct { sbinPath string } func (a alternativesSelector) UseMode(ctx context.Context, mode Mode) error { if err := commands.RunAndReadError(exec.CommandContext(ctx, "alternatives", "--set", "iptables", filepath.Join(a.sbinPath, "iptables-"+string(mode)))); err != nil { return fmt.Errorf("alternatives to update iptables to mode %s: %v", string(mode), err) } return nil } // symlinkSelector manages an iptables setup by manually creating symlinks // that point to the proper "mode" binaries. // It configures: `iptables`, `iptables-save`, `iptables-restore`, // `ip6tables`, `ip6tables-save` and `ip6tables-restore`. type symlinkSelector struct { sbinPath string } func (s symlinkSelector) UseMode(ctx context.Context, mode Mode) error { modeStr := string(mode) xtablesForModePath := XtablesPath(s.sbinPath, mode) cmds := []string{"iptables", "iptables-save", "iptables-restore", "ip6tables", "ip6tables-save", "ip6tables-restore"} for _, cmd := range cmds { cmdPath := filepath.Join(s.sbinPath, cmd) // If deleting fails, ignore it and try to create symlink regardless _ = os.RemoveAll(cmdPath) if err := os.Symlink(xtablesForModePath, cmdPath); err != nil { return fmt.Errorf("creating %s symlink for mode %s: %v", cmd, modeStr, err) } } return nil }
107
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package iptables import ( "bytes" "context" "errors" "github.com/kubernetes-sigs/iptables-wrappers/internal/files" ) // DetectBinaryDir tries to detect the `iptables` location in // either /usr/sbin or /sbin. If it's not there, it returns an error. func DetectBinaryDir() (string, error) { if files.ExecutableExists("/usr/sbin/iptables") { return "/usr/sbin", nil } else if files.ExecutableExists("/sbin/iptables") { return "/sbin", nil } else { return "", errors.New("iptables is not present in either /usr/sbin or /sbin") } } // Mode represents the two different modes iptables can be // configured to: nft or legacy. In string form it can be used to // to complete all `iptables-*` commands. type Mode string const ( Legacy Mode = "legacy" NFT Mode = "nft" ) // DetectMode inspects the current iptables entries and tries to // guess which iptables mode is being used: legacy or nft func DetectMode(ctx context.Context, iptables Installation) Mode { // This method ignores all errors, this is on purpose. We execute all commands // and try to detect patterns in a best effort basis. If somthing fails, // continue with the next step. Worse case scenario if everything fails, // default to nft. // In kubernetes 1.17 and later, kubelet will have created at least // one chain in the "mangle" table (either "KUBE-IPTABLES-HINT" or // "KUBE-KUBELET-CANARY"), so check that first, against // iptables-nft, because we can check that more efficiently and // it's more common these days. rulesOutput := &bytes.Buffer{} _ = iptables.NFTSave(ctx, rulesOutput, "-t", "mangle") if hasKubeletChains(rulesOutput.Bytes()) { return NFT } rulesOutput.Reset() _ = iptables.NFTSaveIP6(ctx, rulesOutput, "-t", "mangle") if hasKubeletChains(rulesOutput.Bytes()) { return NFT } rulesOutput.Reset() // Check for kubernetes 1.17-or-later with iptables-legacy. We // can't pass "-t mangle" to iptables-legacy-save because it would // cause the kernel to create that table if it didn't already // exist, which we don't want. So we have to grab all the rules. _ = iptables.LegacySave(ctx, rulesOutput) if hasKubeletChains(rulesOutput.Bytes()) { return Legacy } rulesOutput.Reset() _ = iptables.LegacySaveIP6(ctx, rulesOutput) if hasKubeletChains(rulesOutput.Bytes()) { return Legacy } // If we can't detect any of the 2 patterns, default to nft. return NFT }
88
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package iptables import "regexp" var ( kubeletChainsRegex = regexp.MustCompile(`(?m)^:(KUBE-IPTABLES-HINT|KUBE-KUBELET-CANARY)`) ruleEntryRegex = regexp.MustCompile(`(?m)^-`) ) // hasKubeletChains checks if the output of an iptables*-save command // contains any of the rules set by kubelet. func hasKubeletChains(output []byte) bool { return kubeletChainsRegex.Match(output) } // ruleEntriesNum counts how many rules there are in an iptables*-save command // output. func ruleEntriesNum(iptablesOutput []byte) int { return len(ruleEntryRegex.FindAllIndex(iptablesOutput, -1)) }
34
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package iptables import ( "bytes" "context" "os/exec" "path/filepath" "github.com/kubernetes-sigs/iptables-wrappers/internal/commands" ) const ( xtablesNFTMultiBinaryName = "xtables-nft-multi" xtablesLegacyMultiBinaryName = "xtables-legacy-multi" ) // Installation represents the set of iptables-*-save binaries installed in a machine. // It is expected the machine supports both nft and legacy modes. This can be implemented by // calling directly iptables-*-save, xtables, etc. The implementation should accept the same // command arguments as the mentioned binaries. type Installation interface { // LegacySave runs a iptables-legacy-save command LegacySave(ctx context.Context, out *bytes.Buffer, args ...string) error // LegacySaveIP6 runs a ip6tables-legacy-save command LegacySaveIP6(ctx context.Context, out *bytes.Buffer, args ...string) error // NFTSave runs a iptables-nft-save command NFTSave(ctx context.Context, out *bytes.Buffer, args ...string) error // NFTSaveIP6 runs a ip6tables-nft-save command NFTSaveIP6(ctx context.Context, out *bytes.Buffer, args ...string) error } func NewXtablesMultiInstallation(sbinPath string) XtablesMulti { return XtablesMulti{ nftBinary: filepath.Join(sbinPath, xtablesNFTMultiBinaryName), legacyBinary: filepath.Join(sbinPath, xtablesLegacyMultiBinaryName), } } // XtablesMulti allows to run iptables commands using xtables-*-multi. // It implements iptablesInstallation. type XtablesMulti struct { nftBinary string legacyBinary string } func (x XtablesMulti) LegacySave(ctx context.Context, out *bytes.Buffer, args ...string) error { return x.exec(ctx, out, x.legacyBinary, "iptables-save", args...) } func (x XtablesMulti) LegacySaveIP6(ctx context.Context, out *bytes.Buffer, args ...string) error { return x.exec(ctx, out, x.legacyBinary, "ip6tables-save", args...) } func (x XtablesMulti) NFTSave(ctx context.Context, out *bytes.Buffer, args ...string) error { return x.exec(ctx, out, x.nftBinary, "iptables-save", args...) } func (x XtablesMulti) NFTSaveIP6(ctx context.Context, out *bytes.Buffer, args ...string) error { return x.exec(ctx, out, x.nftBinary, "ip6tables-save", args...) } func (x XtablesMulti) exec(ctx context.Context, out *bytes.Buffer, multiBinary, command string, args ...string) error { allArgs := make([]string, 0, len(args)+1) allArgs = append(allArgs, command) allArgs = append(allArgs, args...) c := exec.CommandContext(ctx, multiBinary, allArgs...) c.Stdout = out return commands.RunAndReadError(c) } // XtablesPath returns the path to the `xtable-<mode>-multi binary func XtablesPath(sbinPath string, mode Mode) string { return filepath.Join(sbinPath, "xtables-"+string(mode)+"-multi") }
90
eks-distro-build-tooling
aws
Go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "bytes" "context" "os/exec" "path/filepath" "regexp" "testing" "github.com/kubernetes-sigs/iptables-wrappers/internal/commands" "github.com/kubernetes-sigs/iptables-wrappers/internal/iptables" ) // iptablesVersion denotes the IP version for a iptables command, V4 or V6 type iptablesIPVersion string const ( v4 iptablesIPVersion = "iptables" v6 iptablesIPVersion = "ip6tables" ) // iptablesMode represents a iptables mode. type iptablesMode struct { original, wrongMode iptables.Mode // expectedIPTablesVStr is the subtring expected in betwen brakets when // running `iptables -V` for this particular mode // ex. for nft -> `iptables v1.8.7 (nf_tables)` expectedIPTablesVStr string } var legacy = iptablesMode{ original: iptables.Legacy, wrongMode: iptables.NFT, expectedIPTablesVStr: "legacy", } var nft = iptablesMode{ original: iptables.NFT, wrongMode: iptables.Legacy, expectedIPTablesVStr: "nf_tables", } func TestIPTablesWrapperLegacy(t *testing.T) { tt := newIPTablesWrapperTest(t, v4, legacy) runTest(t, tt) } func TestIPTablesWrapperNFT(t *testing.T) { tt := newIPTablesWrapperTest(t, v4, nft) runTest(t, tt) } func TestIP6TablesWrapperLegacy(t *testing.T) { tt := newIPTablesWrapperTest(t, v6, legacy) runTest(t, tt) } func TestIP6TablesWrapperNFT(t *testing.T) { tt := newIPTablesWrapperTest(t, v6, nft) runTest(t, tt) } func runTest(tb testing.TB, test iptablesWrapperTest) { ctx := context.Background() test.assertIPTablesUndecided(tb) tb.Log("Inserting chains") // Initialize the chosen iptables mode with just a hint chain test.iptables.runAndAssertSuccess(ctx, tb, "-t", "mangle", "-N", "KUBE-IPTABLES-HINT") // Put some junk in the other iptables system test.wrongModeIPTables.runAndAssertSuccess(ctx, tb, "-t", "filter", "-N", "BAD-1") test.wrongModeIPTables.runAndAssertSuccess(ctx, tb, "-t", "filter", "-A", "BAD-1", "-j", "ACCEPT") test.wrongModeIPTables.runAndAssertSuccess(ctx, tb, "-t", "filter", "-N", "BAD-2") test.wrongModeIPTables.runAndAssertSuccess(ctx, tb, "-t", "filter", "-A", "BAD-2", "-j", "DROP") test.assertIPTablesUndecided(tb) // This should run the iptables-wrapper tb.Log("Running `iptables -L` command") c := exec.CommandContext(ctx, "iptables", "-L") assertSuccess(tb, commands.RunAndReadError(c)) test.assertIPTablesResolved(ctx, tb) } type iptablesWrapperTest struct { mode iptablesMode iptables, wrongModeIPTables ipTablesRunner sbinPath string wrapperPath string iptablesPath, ip6tablesPath string } // newIPTablesWrapperTest creates a new test setup for a particular IP version of iptables (iptables or ip6tables) // and a particular mode (legacy or nft) func newIPTablesWrapperTest(tb testing.TB, ipV iptablesIPVersion, mode iptablesMode) iptablesWrapperTest { sbinPath, err := iptables.DetectBinaryDir() assertSuccess(tb, err) return iptablesWrapperTest{ mode: mode, iptables: newIPTablesRunner(ipV, mode.original), wrongModeIPTables: newIPTablesRunner(ipV, mode.wrongMode), sbinPath: sbinPath, wrapperPath: filepath.Join(sbinPath, "iptables-wrapper"), iptablesPath: filepath.Join(sbinPath, "iptables"), ip6tablesPath: filepath.Join(sbinPath, "ip6tables"), } } func (tt iptablesWrapperTest) assertIPTablesUndecided(tb testing.TB) { tb.Log("Checking the iptables mode hasn't been decided yet") iptablesRealPath := tt.iptablesRealPath(tb) if !tt.isIPTablesWrapper(iptablesRealPath) { tb.Fatalf("iptables link was resolved prematurely, got [%s]", iptablesRealPath) } tb.Logf("iptables points to %s", iptablesRealPath) ip6tablesRealPath := tt.ip6tablesRealPath(tb) if !tt.isIPTablesWrapper(ip6tablesRealPath) { tb.Fatalf("ip6tables link was resolved prematurely, got [%s]", ip6tablesRealPath) } tb.Logf("ip6tables points to %s", ip6tablesRealPath) } func (tt iptablesWrapperTest) assertIPTablesResolved(ctx context.Context, tb testing.TB) { tb.Logf("Checking the iptables mode has been resolved to %s", tt.mode.original) iptablesRealPath := tt.iptablesRealPath(tb) if tt.isIPTablesWrapper(iptablesRealPath) { tb.Fatal("iptables link is not yet resolved") } ip6tablesRealPath := tt.iptablesRealPath(tb) if tt.isIPTablesWrapper(ip6tablesRealPath) { tb.Fatal("ip6tables link is not yet resolved") } mode := readIPTablesMode(ctx, tb, "iptables") if mode != tt.mode.expectedIPTablesVStr { tb.Fatalf("iptables link resolved incorrectly: expected %s, got %s", tt.mode.expectedIPTablesVStr, mode) } mode = readIPTablesMode(ctx, tb, "ip6tables") if mode != tt.mode.expectedIPTablesVStr { tb.Fatalf("ip6tables link resolved incorrectly: expected %s, got %s", tt.mode.expectedIPTablesVStr, mode) } } func (tt iptablesWrapperTest) isIPTablesWrapper(binaryRealPath string) bool { return binaryRealPath == tt.wrapperPath } func (tt iptablesWrapperTest) iptablesRealPath(tb testing.TB) string { return binaryRealPath(tb, tt.iptablesPath) } func (tt iptablesWrapperTest) ip6tablesRealPath(tb testing.TB) string { return binaryRealPath(tb, tt.ip6tablesPath) } func binaryRealPath(tb testing.TB, binary string) string { realPath, err := filepath.EvalSymlinks(binary) assertSuccess(tb, err) return realPath } func newIPTablesRunner(ipV iptablesIPVersion, mode iptables.Mode) ipTablesRunner { return ipTablesRunner{ binary: string(ipV) + "-" + string(mode), } } type ipTablesRunner struct { binary string } func (r ipTablesRunner) runAndAssertSuccess(ctx context.Context, tb testing.TB, args ...string) { tb.Helper() assertSuccess(tb, r.run(ctx, args...)) } func (r ipTablesRunner) run(ctx context.Context, args ...string) error { c := exec.CommandContext(ctx, r.binary, args...) return commands.RunAndReadError(c) } var iptablesModeRegex = regexp.MustCompile(`^ip6?tables.*\((.+)\).*`) func readIPTablesMode(ctx context.Context, tb testing.TB, iptables string) string { tb.Helper() var out bytes.Buffer c := exec.CommandContext(ctx, iptables, "-V") c.Stdout = &out assertSuccess(tb, commands.RunAndReadError(c)) outIPTablesVersion := out.String() matches := iptablesModeRegex.FindStringSubmatch(outIPTablesVersion) if len(matches) != 2 { tb.Fatalf("Can't read `%s -V` output format: %s", iptables, outIPTablesVersion) } tb.Logf("Output of `%s -V`: %s", iptables, outIPTablesVersion) mode := matches[1] return mode } func assertSuccess(tb testing.TB, err error) { tb.Helper() if err != nil { tb.Fatal(err.Error()) } }
230
eks-distro-build-tooling
aws
Go
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "crypto/x509" "fmt" "io/ioutil" "log" "net/http" "os" "os/user" "time" ) func main() { pool, err := x509.SystemCertPool() if err != nil { fmt.Printf("Error %s loading system certs.\n", err) panic(err) } if pool == nil { fmt.Println("No cert pools.") os.Exit(1) } fmt.Println("Certs Loaded!") resp, err := http.Get("https://google.com") if err != nil { fmt.Printf("Error %s loading google.\n", err) panic(err) } defer resp.Body.Close() _, err = ioutil.ReadAll(resp.Body) if err != nil { fmt.Printf("Error %s loading google.\n", err) panic(err) } // manually set time zone if tz := os.Getenv("TZ"); tz != "" { var err error time.Local, err = time.LoadLocation(tz) if err != nil { log.Printf("error loading location '%s': %v\n", tz, err) panic(err) } } user, err := user.Current() if err != nil { panic(err) } // Current User if user.Name != "nobody" { panic("user name unexpected!") } if user.Uid != "65534" { panic("user name unexpected!") } if user.HomeDir != "/nonexistent" { panic("user home unexpected!") } f, err := ioutil.TempFile("", "sample") if err != nil { panic(err) } fmt.Println("Temp file name:", f.Name()) }
84
eks-distro-build-tooling
aws
Go
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main // #include <stdio.h> // #include <stdlib.h> import "C" import "unsafe" func Print(s string) { cs := C.CString(s) C.fputs(cs, (*C.FILE)(C.stdout)) C.fflush(C.stdout) C.free(unsafe.Pointer(cs)) } func main() { Print("Printed from unsafe C code\n") }
32
eks-distro-build-tooling
aws
Go
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Modeled from https://gist.github.com/zchee/444c8c20aa7756468d8e package main import ( "fmt" "io/ioutil" "log" "os" git "github.com/libgit2/git2go/v33" ) func credentialsMemoryCallback(url string, username string, allowedTypes git.CredType) (*git.Credential, error) { pub, err := ioutil.ReadFile("/root/.ssh/id_rsa.pub") if err != nil { log.Fatalf("unable to read file: %v", err) } pri, err := ioutil.ReadFile("/root/.ssh/id_rsa") if err != nil { log.Fatalf("unable to read file: %v", err) } cred, err := git.NewCredentialSSHKeyFromMemory("git", string(pub), string(pri), "") if err != nil { return nil, err } return cred, nil } func credentialsFileCallback(url string, username string, allowedTypes git.CredType) (*git.Credential, error) { cred, err := git.NewCredSshKey("git", "/root/.ssh/id_rsa.pub", "/root/.ssh/id_rsa", "") if err != nil { return nil, err } return cred, nil } // Made this one just return 0 during troubleshooting... func certificateCheckCallback(cert *git.Certificate, valid bool, hostname string) error { return nil } func main() { cloneOptions := &git.CloneOptions{ FetchOptions: git.FetchOptions{ RemoteCallbacks: git.RemoteCallbacks{ CredentialsCallback: credentialsMemoryCallback, CertificateCheckCallback: certificateCheckCallback, }, }, CheckoutBranch: "main", } _, err := git.Clone(os.Getenv("PRIVATE_REPO"), "private-repo-memory", cloneOptions) if err != nil { log.Panic(err) } if _, err := os.Stat("private-repo-memory"); os.IsNotExist(err) { log.Panic("repo did not clone!") } cloneOptions = &git.CloneOptions{ FetchOptions: git.FetchOptions{ RemoteCallbacks: git.RemoteCallbacks{ CredentialsCallback: credentialsFileCallback, CertificateCheckCallback: certificateCheckCallback, }, }, CheckoutBranch: "main", } _, err = git.Clone(os.Getenv("PRIVATE_REPO"), "private-repo-file", cloneOptions) if err != nil { log.Panic(err) } if _, err := os.Stat("private-repo-file"); os.IsNotExist(err) { log.Panic("repo did not clone!") } cloneOptions = &git.CloneOptions{ CheckoutBranch: "main", } _, err = git.Clone("https://github.com/aws/eks-distro.git", "public-repo", cloneOptions) if err != nil { log.Panic(err) } if _, err := os.Stat("private-repo-file"); os.IsNotExist(err) { log.Panic("repo did not clone!") } fmt.Println("Successfully cloned!") }
106
eks-distro-build-tooling
aws
Go
package dhcp4client import ( "bytes" "fmt" "hash/fnv" "math/rand" "net" "sync" "syscall" "time" "github.com/d2g/dhcp4" ) const ( MaxDHCPLen = 576 ) type Client struct { hardwareAddr net.HardwareAddr //The HardwareAddr to send in the request. ignoreServers []net.IP //List of Servers to Ignore requests from. timeout time.Duration //Time before we timeout. broadcast bool //Set the Bcast flag in BOOTP Flags connection ConnectionInt //The Connection Method to use generateXID func([]byte) //Function Used to Generate a XID } //Abstracts the type of underlying socket used type ConnectionInt interface { Close() error Write(packet []byte) error ReadFrom() ([]byte, net.IP, error) SetReadTimeout(t time.Duration) error } func New(options ...func(*Client) error) (*Client, error) { c := Client{ timeout: time.Second * 10, broadcast: true, } err := c.SetOption(options...) if err != nil { return nil, err } if c.generateXID == nil { // https://tools.ietf.org/html/rfc2131#section-4.1 explains: // // A DHCP client MUST choose 'xid's in such a way as to minimize the chance // of using an 'xid' identical to one used by another client. // // Hence, seed a random number generator with the current time and hardware // address. h := fnv.New64() h.Write(c.hardwareAddr) seed := int64(h.Sum64()) + time.Now().Unix() rnd := rand.New(rand.NewSource(seed)) var rndMu sync.Mutex c.generateXID = func(b []byte) { rndMu.Lock() defer rndMu.Unlock() rnd.Read(b) } } //if connection hasn't been set as an option create the default. if c.connection == nil { conn, err := NewInetSock() if err != nil { return nil, err } c.connection = conn } return &c, nil } func (c *Client) SetOption(options ...func(*Client) error) error { for _, opt := range options { if err := opt(c); err != nil { return err } } return nil } func Timeout(t time.Duration) func(*Client) error { return func(c *Client) error { c.timeout = t return nil } } func IgnoreServers(s []net.IP) func(*Client) error { return func(c *Client) error { c.ignoreServers = s return nil } } func HardwareAddr(h net.HardwareAddr) func(*Client) error { return func(c *Client) error { c.hardwareAddr = h return nil } } func Broadcast(b bool) func(*Client) error { return func(c *Client) error { c.broadcast = b return nil } } func Connection(conn ConnectionInt) func(*Client) error { return func(c *Client) error { c.connection = conn return nil } } func GenerateXID(g func([]byte)) func(*Client) error { return func(c *Client) error { c.generateXID = g return nil } } //Close Connections func (c *Client) Close() error { if c.connection != nil { return c.connection.Close() } return nil } //Send the Discovery Packet to the Broadcast Channel func (c *Client) SendDiscoverPacket() (dhcp4.Packet, error) { discoveryPacket := c.DiscoverPacket() discoveryPacket.PadToMinSize() return discoveryPacket, c.SendPacket(discoveryPacket) } // TimeoutError records a timeout when waiting for a DHCP packet. type TimeoutError struct { Timeout time.Duration } func (te *TimeoutError) Error() string { return fmt.Sprintf("no DHCP packet received within %v", te.Timeout) } //Retreive Offer... //Wait for the offer for a specific Discovery Packet. func (c *Client) GetOffer(discoverPacket *dhcp4.Packet) (dhcp4.Packet, error) { start := time.Now() for { timeout := c.timeout - time.Since(start) if timeout < 0 { return dhcp4.Packet{}, &TimeoutError{Timeout: c.timeout} } c.connection.SetReadTimeout(timeout) readBuffer, source, err := c.connection.ReadFrom() if err != nil { if errno, ok := err.(syscall.Errno); ok && errno == syscall.EAGAIN { return dhcp4.Packet{}, &TimeoutError{Timeout: c.timeout} } return dhcp4.Packet{}, err } offerPacket := dhcp4.Packet(readBuffer) offerPacketOptions := offerPacket.ParseOptions() // Ignore Servers in my Ignore list for _, ignoreServer := range c.ignoreServers { if source.Equal(ignoreServer) { continue } if offerPacket.SIAddr().Equal(ignoreServer) { continue } } if len(offerPacketOptions[dhcp4.OptionDHCPMessageType]) < 1 || dhcp4.MessageType(offerPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.Offer || !bytes.Equal(discoverPacket.XId(), offerPacket.XId()) { continue } return offerPacket, nil } } //Send Request Based On the offer Received. func (c *Client) SendRequest(offerPacket *dhcp4.Packet) (dhcp4.Packet, error) { requestPacket := c.RequestPacket(offerPacket) requestPacket.PadToMinSize() return requestPacket, c.SendPacket(requestPacket) } //Retreive Acknowledgement //Wait for the offer for a specific Request Packet. func (c *Client) GetAcknowledgement(requestPacket *dhcp4.Packet) (dhcp4.Packet, error) { start := time.Now() for { timeout := c.timeout - time.Since(start) if timeout < 0 { return dhcp4.Packet{}, &TimeoutError{Timeout: c.timeout} } c.connection.SetReadTimeout(timeout) readBuffer, source, err := c.connection.ReadFrom() if err != nil { if errno, ok := err.(syscall.Errno); ok && errno == syscall.EAGAIN { return dhcp4.Packet{}, &TimeoutError{Timeout: c.timeout} } return dhcp4.Packet{}, err } acknowledgementPacket := dhcp4.Packet(readBuffer) acknowledgementPacketOptions := acknowledgementPacket.ParseOptions() // Ignore Servers in my Ignore list for _, ignoreServer := range c.ignoreServers { if source.Equal(ignoreServer) { continue } if acknowledgementPacket.SIAddr().Equal(ignoreServer) { continue } } if !bytes.Equal(requestPacket.XId(), acknowledgementPacket.XId()) || len(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType]) < 1 || (dhcp4.MessageType(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK && dhcp4.MessageType(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.NAK) { continue } return acknowledgementPacket, nil } } //Send Decline to the received acknowledgement. func (c *Client) SendDecline(acknowledgementPacket *dhcp4.Packet) (dhcp4.Packet, error) { declinePacket := c.DeclinePacket(acknowledgementPacket) declinePacket.PadToMinSize() return declinePacket, c.SendPacket(declinePacket) } //Send a DHCP Packet. func (c *Client) SendPacket(packet dhcp4.Packet) error { return c.connection.Write(packet) } //Create Discover Packet func (c *Client) DiscoverPacket() dhcp4.Packet { messageid := make([]byte, 4) c.generateXID(messageid) packet := dhcp4.NewPacket(dhcp4.BootRequest) packet.SetCHAddr(c.hardwareAddr) packet.SetXId(messageid) packet.SetBroadcast(c.broadcast) packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Discover)}) //packet.PadToMinSize() return packet } //Create Request Packet func (c *Client) RequestPacket(offerPacket *dhcp4.Packet) dhcp4.Packet { offerOptions := offerPacket.ParseOptions() packet := dhcp4.NewPacket(dhcp4.BootRequest) packet.SetCHAddr(c.hardwareAddr) packet.SetXId(offerPacket.XId()) packet.SetCIAddr(offerPacket.CIAddr()) packet.SetSIAddr(offerPacket.SIAddr()) packet.SetBroadcast(c.broadcast) packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Request)}) packet.AddOption(dhcp4.OptionRequestedIPAddress, (offerPacket.YIAddr()).To4()) packet.AddOption(dhcp4.OptionServerIdentifier, offerOptions[dhcp4.OptionServerIdentifier]) return packet } //Create Request Packet For a Renew func (c *Client) RenewalRequestPacket(acknowledgement *dhcp4.Packet) dhcp4.Packet { messageid := make([]byte, 4) c.generateXID(messageid) acknowledgementOptions := acknowledgement.ParseOptions() packet := dhcp4.NewPacket(dhcp4.BootRequest) packet.SetCHAddr(acknowledgement.CHAddr()) packet.SetXId(messageid) packet.SetCIAddr(acknowledgement.YIAddr()) packet.SetSIAddr(acknowledgement.SIAddr()) packet.SetBroadcast(c.broadcast) packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Request)}) packet.AddOption(dhcp4.OptionRequestedIPAddress, (acknowledgement.YIAddr()).To4()) packet.AddOption(dhcp4.OptionServerIdentifier, acknowledgementOptions[dhcp4.OptionServerIdentifier]) return packet } //Create Release Packet For a Release func (c *Client) ReleasePacket(acknowledgement *dhcp4.Packet) dhcp4.Packet { messageid := make([]byte, 4) c.generateXID(messageid) acknowledgementOptions := acknowledgement.ParseOptions() packet := dhcp4.NewPacket(dhcp4.BootRequest) packet.SetCHAddr(acknowledgement.CHAddr()) packet.SetXId(messageid) packet.SetCIAddr(acknowledgement.YIAddr()) packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Release)}) packet.AddOption(dhcp4.OptionServerIdentifier, acknowledgementOptions[dhcp4.OptionServerIdentifier]) return packet } //Create Decline Packet func (c *Client) DeclinePacket(acknowledgement *dhcp4.Packet) dhcp4.Packet { messageid := make([]byte, 4) c.generateXID(messageid) acknowledgementOptions := acknowledgement.ParseOptions() packet := dhcp4.NewPacket(dhcp4.BootRequest) packet.SetCHAddr(acknowledgement.CHAddr()) packet.SetXId(messageid) packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Decline)}) packet.AddOption(dhcp4.OptionRequestedIPAddress, (acknowledgement.YIAddr()).To4()) packet.AddOption(dhcp4.OptionServerIdentifier, acknowledgementOptions[dhcp4.OptionServerIdentifier]) return packet } //Lets do a Full DHCP Request. func (c *Client) Request() (bool, dhcp4.Packet, error) { discoveryPacket, err := c.SendDiscoverPacket() if err != nil { return false, discoveryPacket, err } offerPacket, err := c.GetOffer(&discoveryPacket) if err != nil { return false, offerPacket, err } requestPacket, err := c.SendRequest(&offerPacket) if err != nil { return false, requestPacket, err } acknowledgement, err := c.GetAcknowledgement(&requestPacket) if err != nil { return false, acknowledgement, err } acknowledgementOptions := acknowledgement.ParseOptions() if dhcp4.MessageType(acknowledgementOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK { return false, acknowledgement, nil } return true, acknowledgement, nil } //Renew a lease backed on the Acknowledgement Packet. //Returns Sucessfull, The AcknoledgementPacket, Any Errors func (c *Client) Renew(acknowledgement dhcp4.Packet) (bool, dhcp4.Packet, error) { renewRequest := c.RenewalRequestPacket(&acknowledgement) renewRequest.PadToMinSize() err := c.SendPacket(renewRequest) if err != nil { return false, renewRequest, err } newAcknowledgement, err := c.GetAcknowledgement(&renewRequest) if err != nil { return false, newAcknowledgement, err } newAcknowledgementOptions := newAcknowledgement.ParseOptions() if dhcp4.MessageType(newAcknowledgementOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK { return false, newAcknowledgement, nil } return true, newAcknowledgement, nil } //Release a lease backed on the Acknowledgement Packet. //Returns Any Errors func (c *Client) Release(acknowledgement dhcp4.Packet) error { release := c.ReleasePacket(&acknowledgement) release.PadToMinSize() return c.SendPacket(release) }
417
eks-distro-build-tooling
aws
Go
package dhcp4client import ( cryptorand "crypto/rand" mathrand "math/rand" ) func CryptoGenerateXID(b []byte) { if _, err := cryptorand.Read(b); err != nil { panic(err) } } func MathGenerateXID(b []byte) { if _, err := mathrand.Read(b); err != nil { panic(err) } }
19
eks-distro-build-tooling
aws
Go
package dhcp4client import ( "net" "time" ) type inetSock struct { *net.UDPConn laddr net.UDPAddr raddr net.UDPAddr } func NewInetSock(options ...func(*inetSock) error) (*inetSock, error) { c := &inetSock{ laddr: net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 68}, raddr: net.UDPAddr{IP: net.IPv4bcast, Port: 67}, } err := c.setOption(options...) if err != nil { return nil, err } conn, err := net.ListenUDP("udp4", &c.laddr) if err != nil { return nil, err } c.UDPConn = conn return c, err } func (c *inetSock) setOption(options ...func(*inetSock) error) error { for _, opt := range options { if err := opt(c); err != nil { return err } } return nil } func SetLocalAddr(l net.UDPAddr) func(*inetSock) error { return func(c *inetSock) error { c.laddr = l return nil } } func SetRemoteAddr(r net.UDPAddr) func(*inetSock) error { return func(c *inetSock) error { c.raddr = r return nil } } func (c *inetSock) Write(packet []byte) error { _, err := c.WriteToUDP(packet, &c.raddr) return err } func (c *inetSock) ReadFrom() ([]byte, net.IP, error) { readBuffer := make([]byte, MaxDHCPLen) n, source, err := c.ReadFromUDP(readBuffer) if source != nil { return readBuffer[:n], source.IP, err } else { return readBuffer[:n], net.IP{}, err } } func (c *inetSock) SetReadTimeout(t time.Duration) error { return c.SetReadDeadline(time.Now().Add(t)) }
76
eks-distro-build-tooling
aws
Go
package dhcp4client import ( "encoding/binary" "math/rand" "net" "time" "golang.org/x/sys/unix" ) const ( minIPHdrLen = 20 maxIPHdrLen = 60 udpHdrLen = 8 ip4Ver = 0x40 ttl = 16 srcPort = 68 dstPort = 67 ) var ( bcastMAC = []byte{255, 255, 255, 255, 255, 255} ) // abstracts AF_PACKET type packetSock struct { fd int ifindex int } func NewPacketSock(ifindex int) (*packetSock, error) { fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_DGRAM, int(swap16(unix.ETH_P_IP))) if err != nil { return nil, err } addr := unix.SockaddrLinklayer{ Ifindex: ifindex, Protocol: swap16(unix.ETH_P_IP), } if err = unix.Bind(fd, &addr); err != nil { return nil, err } return &packetSock{ fd: fd, ifindex: ifindex, }, nil } func (pc *packetSock) Close() error { return unix.Close(pc.fd) } func (pc *packetSock) Write(packet []byte) error { lladdr := unix.SockaddrLinklayer{ Ifindex: pc.ifindex, Protocol: swap16(unix.ETH_P_IP), Halen: uint8(len(bcastMAC)), } copy(lladdr.Addr[:], bcastMAC) pkt := make([]byte, minIPHdrLen+udpHdrLen+len(packet)) fillIPHdr(pkt[0:minIPHdrLen], udpHdrLen+uint16(len(packet))) fillUDPHdr(pkt[minIPHdrLen:minIPHdrLen+udpHdrLen], uint16(len(packet))) // payload copy(pkt[minIPHdrLen+udpHdrLen:len(pkt)], packet) return unix.Sendto(pc.fd, pkt, 0, &lladdr) } func (pc *packetSock) ReadFrom() ([]byte, net.IP, error) { pkt := make([]byte, maxIPHdrLen+udpHdrLen+MaxDHCPLen) n, _, err := unix.Recvfrom(pc.fd, pkt, 0) if err != nil { return nil, nil, err } // IP hdr len ihl := int(pkt[0]&0x0F) * 4 // Source IP address src := net.IP(pkt[12:16]) return pkt[ihl+udpHdrLen : n], src, nil } func (pc *packetSock) SetReadTimeout(t time.Duration) error { tv := unix.NsecToTimeval(t.Nanoseconds()) return unix.SetsockoptTimeval(pc.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv) } // compute's 1's complement checksum func chksum(p []byte, csum []byte) { cklen := len(p) s := uint32(0) for i := 0; i < (cklen - 1); i += 2 { s += uint32(p[i+1])<<8 | uint32(p[i]) } if cklen&1 == 1 { s += uint32(p[cklen-1]) } s = (s >> 16) + (s & 0xffff) s = s + (s >> 16) s = ^s csum[0] = uint8(s & 0xff) csum[1] = uint8(s >> 8) } func fillIPHdr(hdr []byte, payloadLen uint16) { // version + IHL hdr[0] = ip4Ver | (minIPHdrLen / 4) // total length binary.BigEndian.PutUint16(hdr[2:4], uint16(len(hdr))+payloadLen) // identification if _, err := rand.Read(hdr[4:5]); err != nil { panic(err) } // TTL hdr[8] = 16 // Protocol hdr[9] = unix.IPPROTO_UDP // dst IP copy(hdr[16:20], net.IPv4bcast.To4()) // compute IP hdr checksum chksum(hdr[0:len(hdr)], hdr[10:12]) } func fillUDPHdr(hdr []byte, payloadLen uint16) { // src port binary.BigEndian.PutUint16(hdr[0:2], srcPort) // dest port binary.BigEndian.PutUint16(hdr[2:4], dstPort) // length binary.BigEndian.PutUint16(hdr[4:6], udpHdrLen+payloadLen) } func swap16(x uint16) uint16 { var b [2]byte binary.BigEndian.PutUint16(b[:], x) return binary.LittleEndian.Uint16(b[:]) }
148
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // PeekOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() previous, ok = c.lru.Peek(key) if ok { return previous, true, false } evicted = c.lru.Add(key, value) return nil, false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) (present bool) { c.lock.Lock() present = c.lru.Remove(key) c.lock.Unlock() return } // Resize changes the cache size. func (c *Cache) Resize(size int) (evicted int) { c.lock.Lock() evicted = c.lru.Resize(size) c.lock.Unlock() return evicted } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() key, value, ok = c.lru.RemoveOldest() c.lock.Unlock() return } // GetOldest returns the oldest entry func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() key, value, ok = c.lru.GetOldest() c.lock.Unlock() return } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
151
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) if ent.Value.(*entry) == nil { return nil, false } return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // Resize changes the cache size. func (c *LRU) Resize(size int) (evicted int) { diff := c.Len() - size if diff < 0 { diff = 0 } for i := 0; i < diff; i++ { c.removeOldest() } c.size = size return diff } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
178
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Checks if a key exists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clears all cache entries. Purge() // Resizes cache, returning number evicted Resize(int) int }
40
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) { c.lock.Lock() c.lru.Remove(key) c.lock.Unlock() } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { c.lock.Lock() c.lru.RemoveOldest() c.lock.Unlock() } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
117
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
162
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clear all cache entries Purge() }
37
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) { c.lock.Lock() c.lru.Remove(key) c.lock.Unlock() } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { c.lock.Lock() c.lru.RemoveOldest() c.lock.Unlock() } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
117
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
162
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clear all cache entries Purge() }
37
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // PeekOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() previous, ok = c.lru.Peek(key) if ok { return previous, true, false } evicted = c.lru.Add(key, value) return nil, false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) (present bool) { c.lock.Lock() present = c.lru.Remove(key) c.lock.Unlock() return } // Resize changes the cache size. func (c *Cache) Resize(size int) (evicted int) { c.lock.Lock() evicted = c.lru.Resize(size) c.lock.Unlock() return evicted } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() key, value, ok = c.lru.RemoveOldest() c.lock.Unlock() return } // GetOldest returns the oldest entry func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() key, value, ok = c.lru.GetOldest() c.lock.Unlock() return } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
151
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) if ent.Value.(*entry) == nil { return nil, false } return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // Resize changes the cache size. func (c *LRU) Resize(size int) (evicted int) { diff := c.Len() - size if diff < 0 { diff = 0 } for i := 0; i < diff; i++ { c.removeOldest() } c.size = size return diff } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
178
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Checks if a key exists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clears all cache entries. Purge() // Resizes cache, returning number evicted Resize(int) int }
40
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) { c.lock.Lock() c.lru.Remove(key) c.lock.Unlock() } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { c.lock.Lock() c.lru.RemoveOldest() c.lock.Unlock() } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
117
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
162
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clear all cache entries Purge() }
37
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) { c.lock.Lock() c.lru.Remove(key) c.lock.Unlock() } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { c.lock.Lock() c.lru.RemoveOldest() c.lock.Unlock() } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
117
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
162
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clear all cache entries Purge() }
37
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) { c.lock.Lock() c.lru.Remove(key) c.lock.Unlock() } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { c.lock.Lock() c.lru.RemoveOldest() c.lock.Unlock() } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
117
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
162
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clear all cache entries Purge() }
37
eks-distro-build-tooling
aws
Go
package hcl import ( "errors" "fmt" "reflect" "sort" "strconv" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/parser" "github.com/hashicorp/hcl/hcl/token" ) // This is the tag to use with structures to have settings for HCL const tagName = "hcl" var ( // nodeType holds a reference to the type of ast.Node nodeType reflect.Type = findNodeType() ) // Unmarshal accepts a byte slice as input and writes the // data to the value pointed to by v. func Unmarshal(bs []byte, v interface{}) error { root, err := parse(bs) if err != nil { return err } return DecodeObject(v, root) } // Decode reads the given input and decodes it into the structure // given by `out`. func Decode(out interface{}, in string) error { obj, err := Parse(in) if err != nil { return err } return DecodeObject(out, obj) } // DecodeObject is a lower-level version of Decode. It decodes a // raw Object into the given output. func DecodeObject(out interface{}, n ast.Node) error { val := reflect.ValueOf(out) if val.Kind() != reflect.Ptr { return errors.New("result must be a pointer") } // If we have the file, we really decode the root node if f, ok := n.(*ast.File); ok { n = f.Node } var d decoder return d.decode("root", n, val.Elem()) } type decoder struct { stack []reflect.Kind } func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { k := result // If we have an interface with a valid value, we use that // for the check. if result.Kind() == reflect.Interface { elem := result.Elem() if elem.IsValid() { k = elem } } // Push current onto stack unless it is an interface. if k.Kind() != reflect.Interface { d.stack = append(d.stack, k.Kind()) // Schedule a pop defer func() { d.stack = d.stack[:len(d.stack)-1] }() } switch k.Kind() { case reflect.Bool: return d.decodeBool(name, node, result) case reflect.Float32, reflect.Float64: return d.decodeFloat(name, node, result) case reflect.Int, reflect.Int32, reflect.Int64: return d.decodeInt(name, node, result) case reflect.Interface: // When we see an interface, we make our own thing return d.decodeInterface(name, node, result) case reflect.Map: return d.decodeMap(name, node, result) case reflect.Ptr: return d.decodePtr(name, node, result) case reflect.Slice: return d.decodeSlice(name, node, result) case reflect.String: return d.decodeString(name, node, result) case reflect.Struct: return d.decodeStruct(name, node, result) default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), } } } func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: if n.Token.Type == token.BOOL { v, err := strconv.ParseBool(n.Token.Text) if err != nil { return err } result.Set(reflect.ValueOf(v)) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { v, err := strconv.ParseFloat(n.Token.Text, 64) if err != nil { return err } result.Set(reflect.ValueOf(v).Convert(result.Type())) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: switch n.Token.Type { case token.NUMBER: v, err := strconv.ParseInt(n.Token.Text, 0, 0) if err != nil { return err } if result.Kind() == reflect.Interface { result.Set(reflect.ValueOf(int(v))) } else { result.SetInt(v) } return nil case token.STRING: v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) if err != nil { return err } if result.Kind() == reflect.Interface { result.Set(reflect.ValueOf(int(v))) } else { result.SetInt(v) } return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { // When we see an ast.Node, we retain the value to enable deferred decoding. // Very useful in situations where we want to preserve ast.Node information // like Pos if result.Type() == nodeType && result.CanSet() { result.Set(reflect.ValueOf(node)) return nil } var set reflect.Value redecode := true // For testing types, ObjectType should just be treated as a list. We // set this to a temporary var because we want to pass in the real node. testNode := node if ot, ok := node.(*ast.ObjectType); ok { testNode = ot.List } switch n := testNode.(type) { case *ast.ObjectList: // If we're at the root or we're directly within a slice, then we // decode objects into map[string]interface{}, otherwise we decode // them into lists. if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { var temp map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeMap( reflect.MapOf( reflect.TypeOf(""), tempVal.Type().Elem())) set = result } else { var temp []map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) set = result } case *ast.ObjectType: // If we're at the root or we're directly within a slice, then we // decode objects into map[string]interface{}, otherwise we decode // them into lists. if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { var temp map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeMap( reflect.MapOf( reflect.TypeOf(""), tempVal.Type().Elem())) set = result } else { var temp []map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, 1) set = result } case *ast.ListType: var temp []interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, 0) set = result case *ast.LiteralType: switch n.Token.Type { case token.BOOL: var result bool set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.FLOAT: var result float64 set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.NUMBER: var result int set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.STRING, token.HEREDOC: set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), } } default: return fmt.Errorf( "%s: cannot decode into interface: %T", name, node) } // Set the result to what its supposed to be, then reset // result so we don't reflect into this method anymore. result.Set(set) if redecode { // Revisit the node so that we can use the newly instantiated // thing and populate it. if err := d.decode(name, node, result); err != nil { return err } } return nil } func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { if item, ok := node.(*ast.ObjectItem); ok { node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} } if ot, ok := node.(*ast.ObjectType); ok { node = ot.List } n, ok := node.(*ast.ObjectList) if !ok { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), } } // If we have an interface, then we can address the interface, // but not the slice itself, so get the element but set the interface set := result if result.Kind() == reflect.Interface { result = result.Elem() } resultType := result.Type() resultElemType := resultType.Elem() resultKeyType := resultType.Key() if resultKeyType.Kind() != reflect.String { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: map must have string keys", name), } } // Make a map if it is nil resultMap := result if result.IsNil() { resultMap = reflect.MakeMap( reflect.MapOf(resultKeyType, resultElemType)) } // Go through each element and decode it. done := make(map[string]struct{}) for _, item := range n.Items { if item.Val == nil { continue } // github.com/hashicorp/terraform/issue/5740 if len(item.Keys) == 0 { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: map must have string keys", name), } } // Get the key we're dealing with, which is the first item keyStr := item.Keys[0].Token.Value().(string) // If we've already processed this key, then ignore it if _, ok := done[keyStr]; ok { continue } // Determine the value. If we have more than one key, then we // get the objectlist of only these keys. itemVal := item.Val if len(item.Keys) > 1 { itemVal = n.Filter(keyStr) done[keyStr] = struct{}{} } // Make the field name fieldName := fmt.Sprintf("%s.%s", name, keyStr) // Get the key/value as reflection values key := reflect.ValueOf(keyStr) val := reflect.Indirect(reflect.New(resultElemType)) // If we have a pre-existing value in the map, use that oldVal := resultMap.MapIndex(key) if oldVal.IsValid() { val.Set(oldVal) } // Decode! if err := d.decode(fieldName, itemVal, val); err != nil { return err } // Set the value on the map resultMap.SetMapIndex(key, val) } // Set the final map if we can set.Set(resultMap) return nil } func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. resultType := result.Type() resultElemType := resultType.Elem() val := reflect.New(resultElemType) if err := d.decode(name, node, reflect.Indirect(val)); err != nil { return err } result.Set(val) return nil } func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { // If we have an interface, then we can address the interface, // but not the slice itself, so get the element but set the interface set := result if result.Kind() == reflect.Interface { result = result.Elem() } // Create the slice if it isn't nil resultType := result.Type() resultElemType := resultType.Elem() if result.IsNil() { resultSliceType := reflect.SliceOf(resultElemType) result = reflect.MakeSlice( resultSliceType, 0, 0) } // Figure out the items we'll be copying into the slice var items []ast.Node switch n := node.(type) { case *ast.ObjectList: items = make([]ast.Node, len(n.Items)) for i, item := range n.Items { items[i] = item } case *ast.ObjectType: items = []ast.Node{n} case *ast.ListType: items = n.List default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("unknown slice type: %T", node), } } for i, item := range items { fieldName := fmt.Sprintf("%s[%d]", name, i) // Decode val := reflect.Indirect(reflect.New(resultElemType)) // if item is an object that was decoded from ambiguous JSON and // flattened, make sure it's expanded if it needs to decode into a // defined structure. item := expandObject(item, val) if err := d.decode(fieldName, item, val); err != nil { return err } // Append it onto the slice result = reflect.Append(result, val) } set.Set(result) return nil } // expandObject detects if an ambiguous JSON object was flattened to a List which // should be decoded into a struct, and expands the ast to properly deocode. func expandObject(node ast.Node, result reflect.Value) ast.Node { item, ok := node.(*ast.ObjectItem) if !ok { return node } elemType := result.Type() // our target type must be a struct switch elemType.Kind() { case reflect.Ptr: switch elemType.Elem().Kind() { case reflect.Struct: //OK default: return node } case reflect.Struct: //OK default: return node } // A list value will have a key and field name. If it had more fields, // it wouldn't have been flattened. if len(item.Keys) != 2 { return node } keyToken := item.Keys[0].Token item.Keys = item.Keys[1:] // we need to un-flatten the ast enough to decode newNode := &ast.ObjectItem{ Keys: []*ast.ObjectKey{ &ast.ObjectKey{ Token: keyToken, }, }, Val: &ast.ObjectType{ List: &ast.ObjectList{ Items: []*ast.ObjectItem{item}, }, }, } return newNode } func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: switch n.Token.Type { case token.NUMBER: result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) return nil case token.STRING, token.HEREDOC: result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type for string %T", name, node), } } func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { var item *ast.ObjectItem if it, ok := node.(*ast.ObjectItem); ok { item = it node = it.Val } if ot, ok := node.(*ast.ObjectType); ok { node = ot.List } // Handle the special case where the object itself is a literal. Previously // the yacc parser would always ensure top-level elements were arrays. The new // parser does not make the same guarantees, thus we need to convert any // top-level literal elements into a list. if _, ok := node.(*ast.LiteralType); ok && item != nil { node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} } list, ok := node.(*ast.ObjectList) if !ok { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), } } // This slice will keep track of all the structs we'll be decoding. // There can be more than one struct if there are embedded structs // that are squashed. structs := make([]reflect.Value, 1, 5) structs[0] = result // Compile the list of all the fields that we're going to be decoding // from all the structs. type field struct { field reflect.StructField val reflect.Value } fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] structType := structVal.Type() for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") // Ignore fields with tag name "-" if tagParts[0] == "-" { continue } if fieldType.Anonymous { fieldKind := fieldType.Type.Kind() if fieldKind != reflect.Struct { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unsupported type to struct: %s", fieldType.Name, fieldKind), } } // We have an embedded field. We "squash" the fields down // if specified in the tag. squash := false for _, tag := range tagParts[1:] { if tag == "squash" { squash = true break } } if squash { structs = append( structs, result.FieldByName(fieldType.Name)) continue } } // Normal struct field, store it away fields = append(fields, field{fieldType, structVal.Field(i)}) } } usedKeys := make(map[string]struct{}) decodedFields := make([]string, 0, len(fields)) decodedFieldsVal := make([]reflect.Value, 0) unusedKeysVal := make([]reflect.Value, 0) for _, f := range fields { field, fieldValue := f.field, f.val if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. if !fieldValue.CanSet() { continue } fieldName := field.Name tagValue := field.Tag.Get(tagName) tagParts := strings.SplitN(tagValue, ",", 2) if len(tagParts) >= 2 { switch tagParts[1] { case "decodedFields": decodedFieldsVal = append(decodedFieldsVal, fieldValue) continue case "key": if item == nil { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: %s asked for 'key', impossible", name, fieldName), } } fieldValue.SetString(item.Keys[0].Token.Value().(string)) continue case "unusedKeys": unusedKeysVal = append(unusedKeysVal, fieldValue) continue } } if tagParts[0] != "" { fieldName = tagParts[0] } // Determine the element we'll use to decode. If it is a single // match (only object with the field), then we decode it exactly. // If it is a prefix match, then we decode the matches. filter := list.Filter(fieldName) prefixMatches := filter.Children() matches := filter.Elem() if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { continue } // Track the used key usedKeys[fieldName] = struct{}{} // Create the field name and decode. We range over the elements // because we actually want the value. fieldName = fmt.Sprintf("%s.%s", name, fieldName) if len(prefixMatches.Items) > 0 { if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { return err } } for _, match := range matches.Items { var decodeNode ast.Node = match.Val if ot, ok := decodeNode.(*ast.ObjectType); ok { decodeNode = &ast.ObjectList{Items: ot.List.Items} } if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { return err } } decodedFields = append(decodedFields, field.Name) } if len(decodedFieldsVal) > 0 { // Sort it so that it is deterministic sort.Strings(decodedFields) for _, v := range decodedFieldsVal { v.Set(reflect.ValueOf(decodedFields)) } } return nil } // findNodeType returns the type of ast.Node func findNodeType() reflect.Type { var nodeContainer struct { Node ast.Node } value := reflect.ValueOf(nodeContainer).FieldByName("Node") return value.Type() }
730
eks-distro-build-tooling
aws
Go
// Package hcl decodes HCL into usable Go structures. // // hcl input can come in either pure HCL format or JSON format. // It can be parsed into an AST, and then decoded into a structure, // or it can be decoded directly from a string into a structure. // // If you choose to parse HCL into a raw AST, the benefit is that you // can write custom visitor implementations to implement custom // semantic checks. By default, HCL does not perform any semantic // checks. package hcl
12
eks-distro-build-tooling
aws
Go
package hcl import ( "unicode" "unicode/utf8" ) type lexModeValue byte const ( lexModeUnknown lexModeValue = iota lexModeHcl lexModeJson ) // lexMode returns whether we're going to be parsing in JSON // mode or HCL mode. func lexMode(v []byte) lexModeValue { var ( r rune w int offset int ) for { r, w = utf8.DecodeRune(v[offset:]) offset += w if unicode.IsSpace(r) { continue } if r == '{' { return lexModeJson } break } return lexModeHcl }
39
eks-distro-build-tooling
aws
Go
package hcl import ( "fmt" "github.com/hashicorp/hcl/hcl/ast" hclParser "github.com/hashicorp/hcl/hcl/parser" jsonParser "github.com/hashicorp/hcl/json/parser" ) // ParseBytes accepts as input byte slice and returns ast tree. // // Input can be either JSON or HCL func ParseBytes(in []byte) (*ast.File, error) { return parse(in) } // ParseString accepts input as a string and returns ast tree. func ParseString(input string) (*ast.File, error) { return parse([]byte(input)) } func parse(in []byte) (*ast.File, error) { switch lexMode(in) { case lexModeHcl: return hclParser.Parse(in) case lexModeJson: return jsonParser.Parse(in) } return nil, fmt.Errorf("unknown config format") } // Parse parses the given input and returns the root object. // // The input format can be either HCL or JSON. func Parse(input string) (*ast.File, error) { return parse([]byte(input)) }
40
eks-distro-build-tooling
aws
Go
// Package ast declares the types used to represent syntax trees for HCL // (HashiCorp Configuration Language) package ast import ( "fmt" "strings" "github.com/hashicorp/hcl/hcl/token" ) // Node is an element in the abstract syntax tree. type Node interface { node() Pos() token.Pos } func (File) node() {} func (ObjectList) node() {} func (ObjectKey) node() {} func (ObjectItem) node() {} func (Comment) node() {} func (CommentGroup) node() {} func (ObjectType) node() {} func (LiteralType) node() {} func (ListType) node() {} // File represents a single HCL file type File struct { Node Node // usually a *ObjectList Comments []*CommentGroup // list of all comments in the source } func (f *File) Pos() token.Pos { return f.Node.Pos() } // ObjectList represents a list of ObjectItems. An HCL file itself is an // ObjectList. type ObjectList struct { Items []*ObjectItem } func (o *ObjectList) Add(item *ObjectItem) { o.Items = append(o.Items, item) } // Filter filters out the objects with the given key list as a prefix. // // The returned list of objects contain ObjectItems where the keys have // this prefix already stripped off. This might result in objects with // zero-length key lists if they have no children. // // If no matches are found, an empty ObjectList (non-nil) is returned. func (o *ObjectList) Filter(keys ...string) *ObjectList { var result ObjectList for _, item := range o.Items { // If there aren't enough keys, then ignore this if len(item.Keys) < len(keys) { continue } match := true for i, key := range item.Keys[:len(keys)] { key := key.Token.Value().(string) if key != keys[i] && !strings.EqualFold(key, keys[i]) { match = false break } } if !match { continue } // Strip off the prefix from the children newItem := *item newItem.Keys = newItem.Keys[len(keys):] result.Add(&newItem) } return &result } // Children returns further nested objects (key length > 0) within this // ObjectList. This should be used with Filter to get at child items. func (o *ObjectList) Children() *ObjectList { var result ObjectList for _, item := range o.Items { if len(item.Keys) > 0 { result.Add(item) } } return &result } // Elem returns items in the list that are direct element assignments // (key length == 0). This should be used with Filter to get at elements. func (o *ObjectList) Elem() *ObjectList { var result ObjectList for _, item := range o.Items { if len(item.Keys) == 0 { result.Add(item) } } return &result } func (o *ObjectList) Pos() token.Pos { // always returns the uninitiliazed position return o.Items[0].Pos() } // ObjectItem represents a HCL Object Item. An item is represented with a key // (or keys). It can be an assignment or an object (both normal and nested) type ObjectItem struct { // keys is only one length long if it's of type assignment. If it's a // nested object it can be larger than one. In that case "assign" is // invalid as there is no assignments for a nested object. Keys []*ObjectKey // assign contains the position of "=", if any Assign token.Pos // val is the item itself. It can be an object,list, number, bool or a // string. If key length is larger than one, val can be only of type // Object. Val Node LeadComment *CommentGroup // associated lead comment LineComment *CommentGroup // associated line comment } func (o *ObjectItem) Pos() token.Pos { // I'm not entirely sure what causes this, but removing this causes // a test failure. We should investigate at some point. if len(o.Keys) == 0 { return token.Pos{} } return o.Keys[0].Pos() } // ObjectKeys are either an identifier or of type string. type ObjectKey struct { Token token.Token } func (o *ObjectKey) Pos() token.Pos { return o.Token.Pos } // LiteralType represents a literal of basic type. Valid types are: // token.NUMBER, token.FLOAT, token.BOOL and token.STRING type LiteralType struct { Token token.Token // comment types, only used when in a list LeadComment *CommentGroup LineComment *CommentGroup } func (l *LiteralType) Pos() token.Pos { return l.Token.Pos } // ListStatement represents a HCL List type type ListType struct { Lbrack token.Pos // position of "[" Rbrack token.Pos // position of "]" List []Node // the elements in lexical order } func (l *ListType) Pos() token.Pos { return l.Lbrack } func (l *ListType) Add(node Node) { l.List = append(l.List, node) } // ObjectType represents a HCL Object Type type ObjectType struct { Lbrace token.Pos // position of "{" Rbrace token.Pos // position of "}" List *ObjectList // the nodes in lexical order } func (o *ObjectType) Pos() token.Pos { return o.Lbrace } // Comment node represents a single //, # style or /*- style commment type Comment struct { Start token.Pos // position of / or # Text string } func (c *Comment) Pos() token.Pos { return c.Start } // CommentGroup node represents a sequence of comments with no other tokens and // no empty lines between. type CommentGroup struct { List []*Comment // len(List) > 0 } func (c *CommentGroup) Pos() token.Pos { return c.List[0].Pos() } //------------------------------------------------------------------- // GoStringer //------------------------------------------------------------------- func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
220
eks-distro-build-tooling
aws
Go
package ast import "fmt" // WalkFunc describes a function to be called for each node during a Walk. The // returned node can be used to rewrite the AST. Walking stops the returned // bool is false. type WalkFunc func(Node) (Node, bool) // Walk traverses an AST in depth-first order: It starts by calling fn(node); // node must not be nil. If fn returns true, Walk invokes fn recursively for // each of the non-nil children of node, followed by a call of fn(nil). The // returned node of fn can be used to rewrite the passed node to fn. func Walk(node Node, fn WalkFunc) Node { rewritten, ok := fn(node) if !ok { return rewritten } switch n := node.(type) { case *File: n.Node = Walk(n.Node, fn) case *ObjectList: for i, item := range n.Items { n.Items[i] = Walk(item, fn).(*ObjectItem) } case *ObjectKey: // nothing to do case *ObjectItem: for i, k := range n.Keys { n.Keys[i] = Walk(k, fn).(*ObjectKey) } if n.Val != nil { n.Val = Walk(n.Val, fn) } case *LiteralType: // nothing to do case *ListType: for i, l := range n.List { n.List[i] = Walk(l, fn) } case *ObjectType: n.List = Walk(n.List, fn).(*ObjectList) default: // should we panic here? fmt.Printf("unknown type: %T\n", n) } fn(nil) return rewritten }
53
eks-distro-build-tooling
aws
Go
package parser import ( "fmt" "github.com/hashicorp/hcl/hcl/token" ) // PosError is a parse error that contains a position. type PosError struct { Pos token.Pos Err error } func (e *PosError) Error() string { return fmt.Sprintf("At %s: %s", e.Pos, e.Err) }
18
eks-distro-build-tooling
aws
Go
// Package parser implements a parser for HCL (HashiCorp Configuration // Language) package parser import ( "bytes" "errors" "fmt" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" "github.com/hashicorp/hcl/hcl/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { // normalize all line endings // since the scanner and output only work with "\n" line endings, we may // end up with dangling "\r" characters in the parsed data. src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } if err != nil { return nil, err } f.Comments = p.comments return f, nil } // objectList parses a list of items within an object (generally k/v pairs). // The parameter" obj" tells this whether to we are within an object (braces: // '{', '}') or just at the top level. If we're within an object, we end // at an RBRACE. func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { if obj { tok := p.scan() p.unscan() if tok.Type == token.RBRACE { break } } n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // object lists can be optionally comma-delimited e.g. when a list of maps // is being expressed, so a comma is allowed here - it's simply consumed tok := p.scan() if tok.Type != token.COMMA { p.unscan() } } return node, nil } func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { endline = p.tok.Pos.Line // count the endline if it's multiline comment, ie starting with /* if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { // don't use range here - no need to decode Unicode code points for i := 0; i < len(p.tok.Text); i++ { if p.tok.Text[i] == '\n' { endline++ } } } comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} p.tok = p.sc.Scan() return } func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { var list []*ast.Comment endline = p.tok.Pos.Line for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { var comment *ast.Comment comment, endline = p.consumeComment() list = append(list, comment) } // add comment group to the comments list comments = &ast.CommentGroup{List: list} p.comments = append(p.comments, comments) return } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if len(keys) > 0 && err == errEofToken { // We ignore eof token here since it is an error if we didn't // receive a value (but we did receive a key) for the item. err = nil } if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { // This is a strange boolean statement, but what it means is: // We have keys with no value, and we're likely in an object // (since RBrace ends an object). For this, we set err to nil so // we continue and get the error below of having the wrong value // type. err = nil // Reset the token type so we don't think it completed fine. See // objectType which uses p.tok.Type to check if we're done with // the object. p.tok.Type = token.EOF } if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } if p.leadComment != nil { o.LeadComment = p.leadComment p.leadComment = nil } switch p.tok.Type { case token.ASSIGN: o.Assign = p.tok.Pos o.Val, err = p.object() if err != nil { return nil, err } case token.LBRACE: o.Val, err = p.objectType() if err != nil { return nil, err } default: keyStr := make([]string, 0, len(keys)) for _, k := range keys { keyStr = append(keyStr, k.Token.Text) } return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf( "key '%s' expected start of object ('{') or assignment ('=')", strings.Join(keyStr, " ")), } } // key=#comment // val if p.lineComment != nil { o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { o.LineComment = p.lineComment p.lineComment = nil } p.unscan() return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: // It is very important to also return the keys here as well as // the error. This is because we need to be able to tell if we // did parse keys prior to finding the EOF, or if we just found // a bare EOF. return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` if keyCount > 1 { return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), } } if keyCount == 0 { return nil, &PosError{ Pos: p.tok.Pos, Err: errors.New("no object keys found!"), } } return keys, nil case token.LBRACE: var err error // If we have no keys, then it is a syntax error. i.e. {{}} is not // allowed. if len(keys) == 0 { err = &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), } } // object return keys, err case token.IDENT, token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("illegal character"), } default: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (ast.Node, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.COMMENT: // implement comment case token.EOF: return nil, errEofToken } return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("Unknown token: %+v", tok), } } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{ Lbrace: p.tok.Pos, } l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), } } o.List = l o.Rbrace = p.tok.Pos // advanced via parseObjectList return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{ Lbrack: p.tok.Pos, } needComma := false for { tok := p.scan() if needComma { switch tok.Type { case token.COMMA, token.RBRACK: default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error parsing list, expected comma or list end, got: %s", tok.Type), } } } switch tok.Type { case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } // If there is a lead comment, apply it if p.leadComment != nil { node.LeadComment = p.leadComment p.leadComment = nil } l.Add(node) needComma = true case token.COMMA: // get next list item or we are at the end // do a look-ahead for line comment p.scan() if p.lineComment != nil && len(l.List) > 0 { lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) if ok { lit.LineComment = p.lineComment l.List[len(l.List)-1] = lit p.lineComment = nil } } p.unscan() needComma = false continue case token.LBRACE: // Looks like a nested object, so parse it out node, err := p.objectType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse object within list: %s", err), } } l.Add(node) needComma = true case token.LBRACK: node, err := p.listType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse list within list: %s", err), } } l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos return l, nil default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), } } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok, }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. In the process, it collects any // comment groups encountered, and remembers the last lead and line comments. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } // Otherwise read the next token from the scanner and Save it to the buffer // in case we unscan later. prev := p.tok p.tok = p.sc.Scan() if p.tok.Type == token.COMMENT { var comment *ast.CommentGroup var endline int // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", // p.tok.Pos.Line, prev.Pos.Line, endline) if p.tok.Pos.Line == prev.Pos.Line { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. comment, endline = p.consumeCommentGroup(0) if p.tok.Pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. p.lineComment = comment } } // consume successor comments, if any endline = -1 for p.tok.Type == token.COMMENT { comment, endline = p.consumeCommentGroup(1) } if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { switch p.tok.Type { case token.RBRACE, token.RBRACK: // Do not count for these cases default: // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. p.leadComment = comment } } } return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
533
eks-distro-build-tooling
aws
Go
package printer import ( "bytes" "fmt" "sort" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/token" ) const ( blank = byte(' ') newline = byte('\n') tab = byte('\t') infinity = 1 << 30 // offset or line ) var ( unindent = []byte("\uE123") // in the private use space ) type printer struct { cfg Config prev token.Pos comments []*ast.CommentGroup // may be nil, contains all comments standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) enableTrace bool indentTrace int } type ByPosition []*ast.CommentGroup func (b ByPosition) Len() int { return len(b) } func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } // collectComments comments all standalone comments which are not lead or line // comment func (p *printer) collectComments(node ast.Node) { // first collect all comments. This is already stored in // ast.File.(comments) ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { switch t := nn.(type) { case *ast.File: p.comments = t.Comments return nn, false } return nn, true }) standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) for _, c := range p.comments { standaloneComments[c.Pos()] = c } // next remove all lead and line comments from the overall comment map. // This will give us comments which are standalone, comments which are not // assigned to any kind of node. ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { switch t := nn.(type) { case *ast.LiteralType: if t.LeadComment != nil { for _, comment := range t.LeadComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } if t.LineComment != nil { for _, comment := range t.LineComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } case *ast.ObjectItem: if t.LeadComment != nil { for _, comment := range t.LeadComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } if t.LineComment != nil { for _, comment := range t.LineComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } } return nn, true }) for _, c := range standaloneComments { p.standaloneComments = append(p.standaloneComments, c) } sort.Sort(ByPosition(p.standaloneComments)) } // output prints creates b printable HCL output and returns it. func (p *printer) output(n interface{}) []byte { var buf bytes.Buffer switch t := n.(type) { case *ast.File: // File doesn't trace so we add the tracing here defer un(trace(p, "File")) return p.output(t.Node) case *ast.ObjectList: defer un(trace(p, "ObjectList")) var index int for { // Determine the location of the next actual non-comment // item. If we're at the end, the next item is at "infinity" var nextItem token.Pos if index != len(t.Items) { nextItem = t.Items[index].Pos() } else { nextItem = token.Pos{Offset: infinity, Line: infinity} } // Go through the standalone comments in the file and print out // the comments that we should be for this object item. for _, c := range p.standaloneComments { // Go through all the comments in the group. The group // should be printed together, not separated by double newlines. printed := false newlinePrinted := false for _, comment := range c.List { // We only care about comments after the previous item // we've printed so that comments are printed in the // correct locations (between two objects for example). // And before the next item. if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { // if we hit the end add newlines so we can print the comment // we don't do this if prev is invalid which means the // beginning of the file since the first comment should // be at the first line. if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { buf.Write([]byte{newline, newline}) newlinePrinted = true } // Write the actual comment. buf.WriteString(comment.Text) buf.WriteByte(newline) // Set printed to true to note that we printed something printed = true } } // If we're not at the last item, write a new line so // that there is a newline separating this comment from // the next object. if printed && index != len(t.Items) { buf.WriteByte(newline) } } if index == len(t.Items) { break } buf.Write(p.output(t.Items[index])) if index != len(t.Items)-1 { // Always write a newline to separate us from the next item buf.WriteByte(newline) // Need to determine if we're going to separate the next item // with a blank line. The logic here is simple, though there // are a few conditions: // // 1. The next object is more than one line away anyways, // so we need an empty line. // // 2. The next object is not a "single line" object, so // we need an empty line. // // 3. This current object is not a single line object, // so we need an empty line. current := t.Items[index] next := t.Items[index+1] if next.Pos().Line != t.Items[index].Pos().Line+1 || !p.isSingleLineObject(next) || !p.isSingleLineObject(current) { buf.WriteByte(newline) } } index++ } case *ast.ObjectKey: buf.WriteString(t.Token.Text) case *ast.ObjectItem: p.prev = t.Pos() buf.Write(p.objectItem(t)) case *ast.LiteralType: buf.Write(p.literalType(t)) case *ast.ListType: buf.Write(p.list(t)) case *ast.ObjectType: buf.Write(p.objectType(t)) default: fmt.Printf(" unknown type: %T\n", n) } return buf.Bytes() } func (p *printer) literalType(lit *ast.LiteralType) []byte { result := []byte(lit.Token.Text) switch lit.Token.Type { case token.HEREDOC: // Clear the trailing newline from heredocs if result[len(result)-1] == '\n' { result = result[:len(result)-1] } // Poison lines 2+ so that we don't indent them result = p.heredocIndent(result) case token.STRING: // If this is a multiline string, poison lines 2+ so we don't // indent them. if bytes.IndexRune(result, '\n') >= 0 { result = p.heredocIndent(result) } } return result } // objectItem returns the printable HCL form of an object item. An object type // starts with one/multiple keys and has a value. The value might be of any // type. func (p *printer) objectItem(o *ast.ObjectItem) []byte { defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) var buf bytes.Buffer if o.LeadComment != nil { for _, comment := range o.LeadComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } // If key and val are on different lines, treat line comments like lead comments. if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { for _, comment := range o.LineComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } for i, k := range o.Keys { buf.WriteString(k.Token.Text) buf.WriteByte(blank) // reach end of key if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { buf.WriteString("=") buf.WriteByte(blank) } } buf.Write(p.output(o.Val)) if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { buf.WriteByte(blank) for _, comment := range o.LineComment.List { buf.WriteString(comment.Text) } } return buf.Bytes() } // objectType returns the printable HCL form of an object type. An object type // begins with a brace and ends with a brace. func (p *printer) objectType(o *ast.ObjectType) []byte { defer un(trace(p, "ObjectType")) var buf bytes.Buffer buf.WriteString("{") var index int var nextItem token.Pos var commented, newlinePrinted bool for { // Determine the location of the next actual non-comment // item. If we're at the end, the next item is the closing brace if index != len(o.List.Items) { nextItem = o.List.Items[index].Pos() } else { nextItem = o.Rbrace } // Go through the standalone comments in the file and print out // the comments that we should be for this object item. for _, c := range p.standaloneComments { printed := false var lastCommentPos token.Pos for _, comment := range c.List { // We only care about comments after the previous item // we've printed so that comments are printed in the // correct locations (between two objects for example). // And before the next item. if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { // If there are standalone comments and the initial newline has not // been printed yet, do it now. if !newlinePrinted { newlinePrinted = true buf.WriteByte(newline) } // add newline if it's between other printed nodes if index > 0 { commented = true buf.WriteByte(newline) } // Store this position lastCommentPos = comment.Pos() // output the comment itself buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) // Set printed to true to note that we printed something printed = true /* if index != len(o.List.Items) { buf.WriteByte(newline) // do not print on the end } */ } } // Stuff to do if we had comments if printed { // Always write a newline buf.WriteByte(newline) // If there is another item in the object and our comment // didn't hug it directly, then make sure there is a blank // line separating them. if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { buf.WriteByte(newline) } } } if index == len(o.List.Items) { p.prev = o.Rbrace break } // At this point we are sure that it's not a totally empty block: print // the initial newline if it hasn't been printed yet by the previous // block about standalone comments. if !newlinePrinted { buf.WriteByte(newline) newlinePrinted = true } // check if we have adjacent one liner items. If yes we'll going to align // the comments. var aligned []*ast.ObjectItem for _, item := range o.List.Items[index:] { // we don't group one line lists if len(o.List.Items) == 1 { break } // one means a oneliner with out any lead comment // two means a oneliner with lead comment // anything else might be something else cur := lines(string(p.objectItem(item))) if cur > 2 { break } curPos := item.Pos() nextPos := token.Pos{} if index != len(o.List.Items)-1 { nextPos = o.List.Items[index+1].Pos() } prevPos := token.Pos{} if index != 0 { prevPos = o.List.Items[index-1].Pos() } // fmt.Println("DEBUG ----------------") // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) if curPos.Line+1 == nextPos.Line { aligned = append(aligned, item) index++ continue } if curPos.Line-1 == prevPos.Line { aligned = append(aligned, item) index++ // finish if we have a new line or comment next. This happens // if the next item is not adjacent if curPos.Line+1 != nextPos.Line { break } continue } break } // put newlines if the items are between other non aligned items. // newlines are also added if there is a standalone comment already, so // check it too if !commented && index != len(aligned) { buf.WriteByte(newline) } if len(aligned) >= 1 { p.prev = aligned[len(aligned)-1].Pos() items := p.alignedItems(aligned) buf.Write(p.indent(items)) } else { p.prev = o.List.Items[index].Pos() buf.Write(p.indent(p.objectItem(o.List.Items[index]))) index++ } buf.WriteByte(newline) } buf.WriteString("}") return buf.Bytes() } func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { var buf bytes.Buffer // find the longest key and value length, needed for alignment var longestKeyLen int // longest key length var longestValLen int // longest value length for _, item := range items { key := len(item.Keys[0].Token.Text) val := len(p.output(item.Val)) if key > longestKeyLen { longestKeyLen = key } if val > longestValLen { longestValLen = val } } for i, item := range items { if item.LeadComment != nil { for _, comment := range item.LeadComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } for i, k := range item.Keys { keyLen := len(k.Token.Text) buf.WriteString(k.Token.Text) for i := 0; i < longestKeyLen-keyLen+1; i++ { buf.WriteByte(blank) } // reach end of key if i == len(item.Keys)-1 && len(item.Keys) == 1 { buf.WriteString("=") buf.WriteByte(blank) } } val := p.output(item.Val) valLen := len(val) buf.Write(val) if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { for i := 0; i < longestValLen-valLen+1; i++ { buf.WriteByte(blank) } for _, comment := range item.LineComment.List { buf.WriteString(comment.Text) } } // do not print for the last item if i != len(items)-1 { buf.WriteByte(newline) } } return buf.Bytes() } // list returns the printable HCL form of an list type. func (p *printer) list(l *ast.ListType) []byte { if p.isSingleLineList(l) { return p.singleLineList(l) } var buf bytes.Buffer buf.WriteString("[") buf.WriteByte(newline) var longestLine int for _, item := range l.List { // for now we assume that the list only contains literal types if lit, ok := item.(*ast.LiteralType); ok { lineLen := len(lit.Token.Text) if lineLen > longestLine { longestLine = lineLen } } } haveEmptyLine := false for i, item := range l.List { // If we have a lead comment, then we want to write that first leadComment := false if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { leadComment = true // Ensure an empty line before every element with a // lead comment (except the first item in a list). if !haveEmptyLine && i != 0 { buf.WriteByte(newline) } for _, comment := range lit.LeadComment.List { buf.Write(p.indent([]byte(comment.Text))) buf.WriteByte(newline) } } // also indent each line val := p.output(item) curLen := len(val) buf.Write(p.indent(val)) // if this item is a heredoc, then we output the comma on // the next line. This is the only case this happens. comma := []byte{','} if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { buf.WriteByte(newline) comma = p.indent(comma) } buf.Write(comma) if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { // if the next item doesn't have any comments, do not align buf.WriteByte(blank) // align one space for i := 0; i < longestLine-curLen; i++ { buf.WriteByte(blank) } for _, comment := range lit.LineComment.List { buf.WriteString(comment.Text) } } buf.WriteByte(newline) // Ensure an empty line after every element with a // lead comment (except the first item in a list). haveEmptyLine = leadComment && i != len(l.List)-1 if haveEmptyLine { buf.WriteByte(newline) } } buf.WriteString("]") return buf.Bytes() } // isSingleLineList returns true if: // * they were previously formatted entirely on one line // * they consist entirely of literals // * there are either no heredoc strings or the list has exactly one element // * there are no line comments func (printer) isSingleLineList(l *ast.ListType) bool { for _, item := range l.List { if item.Pos().Line != l.Lbrack.Line { return false } lit, ok := item.(*ast.LiteralType) if !ok { return false } if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { return false } if lit.LineComment != nil { return false } } return true } // singleLineList prints a simple single line list. // For a definition of "simple", see isSingleLineList above. func (p *printer) singleLineList(l *ast.ListType) []byte { buf := &bytes.Buffer{} buf.WriteString("[") for i, item := range l.List { if i != 0 { buf.WriteString(", ") } // Output the item itself buf.Write(p.output(item)) // The heredoc marker needs to be at the end of line. if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { buf.WriteByte(newline) } } buf.WriteString("]") return buf.Bytes() } // indent indents the lines of the given buffer for each non-empty line func (p *printer) indent(buf []byte) []byte { var prefix []byte if p.cfg.SpacesWidth != 0 { for i := 0; i < p.cfg.SpacesWidth; i++ { prefix = append(prefix, blank) } } else { prefix = []byte{tab} } var res []byte bol := true for _, c := range buf { if bol && c != '\n' { res = append(res, prefix...) } res = append(res, c) bol = c == '\n' } return res } // unindent removes all the indentation from the tombstoned lines func (p *printer) unindent(buf []byte) []byte { var res []byte for i := 0; i < len(buf); i++ { skip := len(buf)-i <= len(unindent) if !skip { skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) } if skip { res = append(res, buf[i]) continue } // We have a marker. we have to backtrace here and clean out // any whitespace ahead of our tombstone up to a \n for j := len(res) - 1; j >= 0; j-- { if res[j] == '\n' { break } res = res[:j] } // Skip the entire unindent marker i += len(unindent) - 1 } return res } // heredocIndent marks all the 2nd and further lines as unindentable func (p *printer) heredocIndent(buf []byte) []byte { var res []byte bol := false for _, c := range buf { if bol && c != '\n' { res = append(res, unindent...) } res = append(res, c) bol = c == '\n' } return res } // isSingleLineObject tells whether the given object item is a single // line object such as "obj {}". // // A single line object: // // * has no lead comments (hence multi-line) // * has no assignment // * has no values in the stanza (within {}) // func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { // If there is a lead comment, can't be one line if val.LeadComment != nil { return false } // If there is assignment, we always break by line if val.Assign.IsValid() { return false } // If it isn't an object type, then its not a single line object ot, ok := val.Val.(*ast.ObjectType) if !ok { return false } // If the object has no items, it is single line! return len(ot.List.Items) == 0 } func lines(txt string) int { endline := 1 for i := 0; i < len(txt); i++ { if txt[i] == '\n' { endline++ } } return endline } // ---------------------------------------------------------------------------- // Tracing support func (p *printer) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) i := 2 * p.indentTrace for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *printer, msg string) *printer { p.printTrace(msg, "(") p.indentTrace++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *printer) { p.indentTrace-- p.printTrace(")") }
790
eks-distro-build-tooling
aws
Go
// Package printer implements printing of AST nodes to HCL format. package printer import ( "bytes" "io" "text/tabwriter" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/parser" ) var DefaultConfig = Config{ SpacesWidth: 2, } // A Config node controls the output of Fprint. type Config struct { SpacesWidth int // if set, it will use spaces instead of tabs for alignment } func (c *Config) Fprint(output io.Writer, node ast.Node) error { p := &printer{ cfg: *c, comments: make([]*ast.CommentGroup, 0), standaloneComments: make([]*ast.CommentGroup, 0), // enableTrace: true, } p.collectComments(node) if _, err := output.Write(p.unindent(p.output(node))); err != nil { return err } // flush tabwriter, if any var err error if tw, _ := output.(*tabwriter.Writer); tw != nil { err = tw.Flush() } return err } // Fprint "pretty-prints" an HCL node to output // It calls Config.Fprint with default settings. func Fprint(output io.Writer, node ast.Node) error { return DefaultConfig.Fprint(output, node) } // Format formats src HCL and returns the result. func Format(src []byte) ([]byte, error) { node, err := parser.Parse(src) if err != nil { return nil, err } var buf bytes.Buffer if err := DefaultConfig.Fprint(&buf, node); err != nil { return nil, err } // Add trailing newline to result buf.WriteString("\n") return buf.Bytes(), nil }
67
eks-distro-build-tooling
aws
Go
// Package scanner implements a scanner for HCL (HashiCorp Configuration // Language) source text. package scanner import ( "bytes" "fmt" "os" "regexp" "unicode" "unicode/utf8" "github.com/hashicorp/hcl/hcl/token" ) // eof represents a marker rune for the end of the reader. const eof = rune(0) // Scanner defines a lexical scanner type Scanner struct { buf *bytes.Buffer // Source buffer for advancing and scanning src []byte // Source buffer for immutable access // Source Position srcPos token.Pos // current position prevPos token.Pos // previous position, used for peek() method lastCharLen int // length of last character in bytes lastLineLen int // length of last line in characters (for correct column reporting) tokStart int // token text start position tokEnd int // token text end position // Error is called for each error encountered. If no Error // function is set, the error is reported to os.Stderr. Error func(pos token.Pos, msg string) // ErrorCount is incremented by one for each error encountered. ErrorCount int // tokPos is the start position of most recently scanned token; set by // Scan. The Filename field is always left untouched by the Scanner. If // an error is reported (via Error) and Position is invalid, the scanner is // not inside a token. tokPos token.Pos } // New creates and initializes a new instance of Scanner using src as // its source content. func New(src []byte) *Scanner { // even though we accept a src, we read from a io.Reader compatible type // (*bytes.Buffer). So in the future we might easily change it to streaming // read. b := bytes.NewBuffer(src) s := &Scanner{ buf: b, src: src, } // srcPosition always starts with 1 s.srcPos.Line = 1 return s } // next reads the next rune from the bufferred reader. Returns the rune(0) if // an error occurs (or io.EOF is returned). func (s *Scanner) next() rune { ch, size, err := s.buf.ReadRune() if err != nil { // advance for error reporting s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size return eof } // remember last position s.prevPos = s.srcPos s.srcPos.Column++ s.lastCharLen = size s.srcPos.Offset += size if ch == utf8.RuneError && size == 1 { s.err("illegal UTF-8 encoding") return ch } if ch == '\n' { s.srcPos.Line++ s.lastLineLen = s.srcPos.Column s.srcPos.Column = 0 } if ch == '\x00' { s.err("unexpected null character (0x00)") return eof } if ch == '\uE123' { s.err("unicode code point U+E123 reserved for internal use") return utf8.RuneError } // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch } // unread unreads the previous read Rune and updates the source position func (s *Scanner) unread() { if err := s.buf.UnreadRune(); err != nil { panic(err) // this is user fault, we should catch it } s.srcPos = s.prevPos // put back last position } // peek returns the next rune without advancing the reader. func (s *Scanner) peek() rune { peek, _, err := s.buf.ReadRune() if err != nil { return eof } s.buf.UnreadRune() return peek } // Scan scans the next token and returns the token. func (s *Scanner) Scan() token.Token { ch := s.next() // skip white space for isWhitespace(ch) { ch = s.next() } var tok token.Type // token text markings s.tokStart = s.srcPos.Offset - s.lastCharLen // token position, initial next() is moving the offset by one(size of rune // actually), though we are interested with the starting point s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen if s.srcPos.Column > 0 { // common case: last character was not a '\n' s.tokPos.Line = s.srcPos.Line s.tokPos.Column = s.srcPos.Column } else { // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) s.tokPos.Line = s.srcPos.Line - 1 s.tokPos.Column = s.lastLineLen } switch { case isLetter(ch): tok = token.IDENT lit := s.scanIdentifier() if lit == "true" || lit == "false" { tok = token.BOOL } case isDecimal(ch): tok = s.scanNumber(ch) default: switch ch { case eof: tok = token.EOF case '"': tok = token.STRING s.scanString() case '#', '/': tok = token.COMMENT s.scanComment(ch) case '.': tok = token.PERIOD ch = s.peek() if isDecimal(ch) { tok = token.FLOAT ch = s.scanMantissa(ch) ch = s.scanExponent(ch) } case '<': tok = token.HEREDOC s.scanHeredoc() case '[': tok = token.LBRACK case ']': tok = token.RBRACK case '{': tok = token.LBRACE case '}': tok = token.RBRACE case ',': tok = token.COMMA case '=': tok = token.ASSIGN case '+': tok = token.ADD case '-': if isDecimal(s.peek()) { ch := s.next() tok = s.scanNumber(ch) } else { tok = token.SUB } default: s.err("illegal char") } } // finish token ending s.tokEnd = s.srcPos.Offset // create token literal var tokenText string if s.tokStart >= 0 { tokenText = string(s.src[s.tokStart:s.tokEnd]) } s.tokStart = s.tokEnd // ensure idempotency of tokenText() call return token.Token{ Type: tok, Pos: s.tokPos, Text: tokenText, } } func (s *Scanner) scanComment(ch rune) { // single line comments if ch == '#' || (ch == '/' && s.peek() != '*') { if ch == '/' && s.peek() != '/' { s.err("expected '/' for comment") return } ch = s.next() for ch != '\n' && ch >= 0 && ch != eof { ch = s.next() } if ch != eof && ch >= 0 { s.unread() } return } // be sure we get the character after /* This allows us to find comment's // that are not erminated if ch == '/' { s.next() ch = s.next() // read character after "/*" } // look for /* - style comments for { if ch < 0 || ch == eof { s.err("comment not terminated") break } ch0 := ch ch = s.next() if ch0 == '*' && ch == '/' { break } } } // scanNumber scans a HCL number definition starting with the given rune func (s *Scanner) scanNumber(ch rune) token.Type { if ch == '0' { // check for hexadecimal, octal or float ch = s.next() if ch == 'x' || ch == 'X' { // hexadecimal ch = s.next() found := false for isHexadecimal(ch) { ch = s.next() found = true } if !found { s.err("illegal hexadecimal number") } if ch != eof { s.unread() } return token.NUMBER } // now it's either something like: 0421(octal) or 0.1231(float) illegalOctal := false for isDecimal(ch) { ch = s.next() if ch == '8' || ch == '9' { // this is just a possibility. For example 0159 is illegal, but // 0159.23 is valid. So we mark a possible illegal octal. If // the next character is not a period, we'll print the error. illegalOctal = true } } if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if illegalOctal { s.err("illegal octal number") } if ch != eof { s.unread() } return token.NUMBER } s.scanMantissa(ch) ch = s.next() // seek forward if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if ch != eof { s.unread() } return token.NUMBER } // scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false for isDecimal(ch) { ch = s.next() scanned = true } if scanned && ch != eof { s.unread() } return ch } // scanFraction scans the fraction after the '.' rune func (s *Scanner) scanFraction(ch rune) rune { if ch == '.' { ch = s.peek() // we peek just to see if we can move forward ch = s.scanMantissa(ch) } return ch } // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' // rune. func (s *Scanner) scanExponent(ch rune) rune { if ch == 'e' || ch == 'E' { ch = s.next() if ch == '-' || ch == '+' { ch = s.next() } ch = s.scanMantissa(ch) } return ch } // scanHeredoc scans a heredoc string func (s *Scanner) scanHeredoc() { // Scan the second '<' in example: '<<EOF' if s.next() != '<' { s.err("heredoc expected second '<', didn't see it") return } // Get the original offset so we can read just the heredoc ident offs := s.srcPos.Offset // Scan the identifier ch := s.next() // Indented heredoc syntax if ch == '-' { ch = s.next() } for isLetter(ch) || isDigit(ch) { ch = s.next() } // If we reached an EOF then that is not good if ch == eof { s.err("heredoc not terminated") return } // Ignore the '\r' in Windows line endings if ch == '\r' { if s.peek() == '\n' { ch = s.next() } } // If we didn't reach a newline then that is also not good if ch != '\n' { s.err("invalid characters in heredoc anchor") return } // Read the identifier identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen] if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') { s.err("zero-length heredoc anchor") return } var identRegexp *regexp.Regexp if identBytes[0] == '-' { identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:])) } else { identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes)) } // Read the actual string value lineStart := s.srcPos.Offset for { ch := s.next() // Special newline handling. if ch == '\n' { // Math is fast, so we first compare the byte counts to see if we have a chance // of seeing the same identifier - if the length is less than the number of bytes // in the identifier, this cannot be a valid terminator. lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { break } // Not an anchor match, record the start of a new line lineStart = s.srcPos.Offset } if ch == eof { s.err("heredoc not terminated") return } } return } // scanString scans a quoted string func (s *Scanner) scanString() { braces := 0 for { // '"' opening already consumed // read character after quote ch := s.next() if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { s.err("literal not terminated") return } if ch == '"' && braces == 0 { break } // If we're going into a ${} then we can ignore quotes for awhile if braces == 0 && ch == '$' && s.peek() == '{' { braces++ s.next() } else if braces > 0 && ch == '{' { braces++ } if braces > 0 && ch == '}' { braces-- } if ch == '\\' { s.scanEscape() } } return } // scanEscape scans an escape sequence func (s *Scanner) scanEscape() rune { // http://en.cppreference.com/w/cpp/language/escape ch := s.next() // read character after '/' switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': // nothing to do case '0', '1', '2', '3', '4', '5', '6', '7': // octal notation ch = s.scanDigits(ch, 8, 3) case 'x': // hexademical notation ch = s.scanDigits(s.next(), 16, 2) case 'u': // universal character name ch = s.scanDigits(s.next(), 16, 4) case 'U': // universal character name ch = s.scanDigits(s.next(), 16, 8) default: s.err("illegal char escape") } return ch } // scanDigits scans a rune with the given base for n times. For example an // octal notation \184 would yield in scanDigits(ch, 8, 3) func (s *Scanner) scanDigits(ch rune, base, n int) rune { start := n for n > 0 && digitVal(ch) < base { ch = s.next() if ch == eof { // If we see an EOF, we halt any more scanning of digits // immediately. break } n-- } if n > 0 { s.err("illegal char escape") } if n != start && ch != eof { // we scanned all digits, put the last non digit char back, // only if we read anything at all s.unread() } return ch } // scanIdentifier scans an identifier and returns the literal string func (s *Scanner) scanIdentifier() string { offs := s.srcPos.Offset - s.lastCharLen ch := s.next() for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { ch = s.next() } if ch != eof { s.unread() // we got identifier, put back latest char } return string(s.src[offs:s.srcPos.Offset]) } // recentPosition returns the position of the character immediately after the // character or token returned by the last call to Scan. func (s *Scanner) recentPosition() (pos token.Pos) { pos.Offset = s.srcPos.Offset - s.lastCharLen switch { case s.srcPos.Column > 0: // common case: last character was not a '\n' pos.Line = s.srcPos.Line pos.Column = s.srcPos.Column case s.lastLineLen > 0: // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) pos.Line = s.srcPos.Line - 1 pos.Column = s.lastLineLen default: // at the beginning of the source pos.Line = 1 pos.Column = 1 } return } // err prints the error of any scanning to s.Error function. If the function is // not defined, by default it prints them to os.Stderr func (s *Scanner) err(msg string) { s.ErrorCount++ pos := s.recentPosition() if s.Error != nil { s.Error(pos, msg) return } fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) } // isHexadecimal returns true if the given rune is a letter func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } // isDigit returns true if the given rune is a decimal digit func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } // isDecimal returns true if the given rune is a decimal number func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } // isHexadecimal returns true if the given rune is an hexadecimal number func isHexadecimal(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' } // isWhitespace returns true if the rune is a space, tab, newline or carriage return func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } // digitVal returns the integer value of a given octal,decimal or hexadecimal rune func digitVal(ch rune) int { switch { case '0' <= ch && ch <= '9': return int(ch - '0') case 'a' <= ch && ch <= 'f': return int(ch - 'a' + 10) case 'A' <= ch && ch <= 'F': return int(ch - 'A' + 10) } return 16 // larger than any legal digit val }
653
eks-distro-build-tooling
aws
Go
package strconv import ( "errors" "unicode/utf8" ) // ErrSyntax indicates that a value does not have the right syntax for the target type. var ErrSyntax = errors.New("invalid syntax") // Unquote interprets s as a single-quoted, double-quoted, // or backquoted Go string literal, returning the string value // that s quotes. (If s is single-quoted, it would be a Go // character literal; Unquote returns the corresponding // one-character string.) func Unquote(s string) (t string, err error) { n := len(s) if n < 2 { return "", ErrSyntax } quote := s[0] if quote != s[n-1] { return "", ErrSyntax } s = s[1 : n-1] if quote != '"' { return "", ErrSyntax } if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { return "", ErrSyntax } // Is it trivial? Avoid allocation. if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { switch quote { case '"': return s, nil case '\'': r, size := utf8.DecodeRuneInString(s) if size == len(s) && (r != utf8.RuneError || size != 1) { return s, nil } } } var runeTmp [utf8.UTFMax]byte buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. for len(s) > 0 { // If we're starting a '${}' then let it through un-unquoted. // Specifically: we don't unquote any characters within the `${}` // section. if s[0] == '$' && len(s) > 1 && s[1] == '{' { buf = append(buf, '$', '{') s = s[2:] // Continue reading until we find the closing brace, copying as-is braces := 1 for len(s) > 0 && braces > 0 { r, size := utf8.DecodeRuneInString(s) if r == utf8.RuneError { return "", ErrSyntax } s = s[size:] n := utf8.EncodeRune(runeTmp[:], r) buf = append(buf, runeTmp[:n]...) switch r { case '{': braces++ case '}': braces-- } } if braces != 0 { return "", ErrSyntax } if len(s) == 0 { // If there's no string left, we're done! break } else { // If there's more left, we need to pop back up to the top of the loop // in case there's another interpolation in this string. continue } } if s[0] == '\n' { return "", ErrSyntax } c, multibyte, ss, err := unquoteChar(s, quote) if err != nil { return "", err } s = ss if c < utf8.RuneSelf || !multibyte { buf = append(buf, byte(c)) } else { n := utf8.EncodeRune(runeTmp[:], c) buf = append(buf, runeTmp[:n]...) } if quote == '\'' && len(s) != 0 { // single-quoted must be single character return "", ErrSyntax } } return string(buf), nil } // contains reports whether the string contains the byte c. func contains(s string, c byte) bool { for i := 0; i < len(s); i++ { if s[i] == c { return true } } return false } func unhex(b byte) (v rune, ok bool) { c := rune(b) switch { case '0' <= c && c <= '9': return c - '0', true case 'a' <= c && c <= 'f': return c - 'a' + 10, true case 'A' <= c && c <= 'F': return c - 'A' + 10, true } return } func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { // easy cases switch c := s[0]; { case c == quote && (quote == '\'' || quote == '"'): err = ErrSyntax return case c >= utf8.RuneSelf: r, size := utf8.DecodeRuneInString(s) return r, true, s[size:], nil case c != '\\': return rune(s[0]), false, s[1:], nil } // hard case: c is backslash if len(s) <= 1 { err = ErrSyntax return } c := s[1] s = s[2:] switch c { case 'a': value = '\a' case 'b': value = '\b' case 'f': value = '\f' case 'n': value = '\n' case 'r': value = '\r' case 't': value = '\t' case 'v': value = '\v' case 'x', 'u', 'U': n := 0 switch c { case 'x': n = 2 case 'u': n = 4 case 'U': n = 8 } var v rune if len(s) < n { err = ErrSyntax return } for j := 0; j < n; j++ { x, ok := unhex(s[j]) if !ok { err = ErrSyntax return } v = v<<4 | x } s = s[n:] if c == 'x' { // single-byte string, possibly not UTF-8 value = v break } if v > utf8.MaxRune { err = ErrSyntax return } value = v multibyte = true case '0', '1', '2', '3', '4', '5', '6', '7': v := rune(c) - '0' if len(s) < 2 { err = ErrSyntax return } for j := 0; j < 2; j++ { // one digit already; two more x := rune(s[j]) - '0' if x < 0 || x > 7 { err = ErrSyntax return } v = (v << 3) | x } s = s[2:] if v > 255 { err = ErrSyntax return } value = v case '\\': value = '\\' case '\'', '"': if c != quote { err = ErrSyntax return } value = rune(c) default: err = ErrSyntax return } tail = s return }
242
eks-distro-build-tooling
aws
Go
package token import "fmt" // Pos describes an arbitrary source position // including the file, line, and column location. // A Position is valid if the line number is > 0. type Pos struct { Filename string // filename, if any Offset int // offset, starting at 0 Line int // line number, starting at 1 Column int // column number, starting at 1 (character count) } // IsValid returns true if the position is valid. func (p *Pos) IsValid() bool { return p.Line > 0 } // String returns a string in one of several forms: // // file:line:column valid position with file name // line:column valid position without file name // file invalid position with file name // - invalid position without file name func (p Pos) String() string { s := p.Filename if p.IsValid() { if s != "" { s += ":" } s += fmt.Sprintf("%d:%d", p.Line, p.Column) } if s == "" { s = "-" } return s } // Before reports whether the position p is before u. func (p Pos) Before(u Pos) bool { return u.Offset > p.Offset || u.Line > p.Line } // After reports whether the position p is after u. func (p Pos) After(u Pos) bool { return u.Offset < p.Offset || u.Line < p.Line }
47
eks-distro-build-tooling
aws
Go
// Package token defines constants representing the lexical tokens for HCL // (HashiCorp Configuration Language) package token import ( "fmt" "strconv" "strings" hclstrconv "github.com/hashicorp/hcl/hcl/strconv" ) // Token defines a single HCL token which can be obtained via the Scanner type Token struct { Type Type Pos Pos Text string JSON bool } // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) type Type int const ( // Special tokens ILLEGAL Type = iota EOF COMMENT identifier_beg IDENT // literals literal_beg NUMBER // 12345 FLOAT // 123.45 BOOL // true,false STRING // "abc" HEREDOC // <<FOO\nbar\nFOO literal_end identifier_end operator_beg LBRACK // [ LBRACE // { COMMA // , PERIOD // . RBRACK // ] RBRACE // } ASSIGN // = ADD // + SUB // - operator_end ) var tokens = [...]string{ ILLEGAL: "ILLEGAL", EOF: "EOF", COMMENT: "COMMENT", IDENT: "IDENT", NUMBER: "NUMBER", FLOAT: "FLOAT", BOOL: "BOOL", STRING: "STRING", LBRACK: "LBRACK", LBRACE: "LBRACE", COMMA: "COMMA", PERIOD: "PERIOD", HEREDOC: "HEREDOC", RBRACK: "RBRACK", RBRACE: "RBRACE", ASSIGN: "ASSIGN", ADD: "ADD", SUB: "SUB", } // String returns the string corresponding to the token tok. func (t Type) String() string { s := "" if 0 <= t && t < Type(len(tokens)) { s = tokens[t] } if s == "" { s = "token(" + strconv.Itoa(int(t)) + ")" } return s } // IsIdentifier returns true for tokens corresponding to identifiers and basic // type literals; it returns false otherwise. func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } // IsLiteral returns true for tokens corresponding to basic type literals; it // returns false otherwise. func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } // IsOperator returns true for tokens corresponding to operators and // delimiters; it returns false otherwise. func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } // String returns the token's literal text. Note that this is only // applicable for certain token types, such as token.IDENT, // token.STRING, etc.. func (t Token) String() string { return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) } // Value returns the properly typed value for this token. The type of // the returned interface{} is guaranteed based on the Type field. // // This can only be called for literal types. If it is called for any other // type, this will panic. func (t Token) Value() interface{} { switch t.Type { case BOOL: if t.Text == "true" { return true } else if t.Text == "false" { return false } panic("unknown bool value: " + t.Text) case FLOAT: v, err := strconv.ParseFloat(t.Text, 64) if err != nil { panic(err) } return float64(v) case NUMBER: v, err := strconv.ParseInt(t.Text, 0, 64) if err != nil { panic(err) } return int64(v) case IDENT: return t.Text case HEREDOC: return unindentHeredoc(t.Text) case STRING: // Determine the Unquote method to use. If it came from JSON, // then we need to use the built-in unquote since we have to // escape interpolations there. f := hclstrconv.Unquote if t.JSON { f = strconv.Unquote } // This case occurs if json null is used if t.Text == "" { return "" } v, err := f(t.Text) if err != nil { panic(fmt.Sprintf("unquote %s err: %s", t.Text, err)) } return v default: panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type)) } } // unindentHeredoc returns the string content of a HEREDOC if it is started with << // and the content of a HEREDOC with the hanging indent removed if it is started with // a <<-, and the terminating line is at least as indented as the least indented line. func unindentHeredoc(heredoc string) string { // We need to find the end of the marker idx := strings.IndexByte(heredoc, '\n') if idx == -1 { panic("heredoc doesn't contain newline") } unindent := heredoc[2] == '-' // We can optimize if the heredoc isn't marked for indentation if !unindent { return string(heredoc[idx+1 : len(heredoc)-idx+1]) } // We need to unindent each line based on the indentation level of the marker lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n") whitespacePrefix := lines[len(lines)-1] isIndented := true for _, v := range lines { if strings.HasPrefix(v, whitespacePrefix) { continue } isIndented = false break } // If all lines are not at least as indented as the terminating mark, return the // heredoc as is, but trim the leading space from the marker on the final line. if !isIndented { return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t") } unindentedLines := make([]string, len(lines)) for k, v := range lines { if k == len(lines)-1 { unindentedLines[k] = "" break } unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix) } return strings.Join(unindentedLines, "\n") }
220
eks-distro-build-tooling
aws
Go
package parser import "github.com/hashicorp/hcl/hcl/ast" // flattenObjects takes an AST node, walks it, and flattens func flattenObjects(node ast.Node) { ast.Walk(node, func(n ast.Node) (ast.Node, bool) { // We only care about lists, because this is what we modify list, ok := n.(*ast.ObjectList) if !ok { return n, true } // Rebuild the item list items := make([]*ast.ObjectItem, 0, len(list.Items)) frontier := make([]*ast.ObjectItem, len(list.Items)) copy(frontier, list.Items) for len(frontier) > 0 { // Pop the current item n := len(frontier) item := frontier[n-1] frontier = frontier[:n-1] switch v := item.Val.(type) { case *ast.ObjectType: items, frontier = flattenObjectType(v, item, items, frontier) case *ast.ListType: items, frontier = flattenListType(v, item, items, frontier) default: items = append(items, item) } } // Reverse the list since the frontier model runs things backwards for i := len(items)/2 - 1; i >= 0; i-- { opp := len(items) - 1 - i items[i], items[opp] = items[opp], items[i] } // Done! Set the original items list.Items = items return n, true }) } func flattenListType( ot *ast.ListType, item *ast.ObjectItem, items []*ast.ObjectItem, frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { // If the list is empty, keep the original list if len(ot.List) == 0 { items = append(items, item) return items, frontier } // All the elements of this object must also be objects! for _, subitem := range ot.List { if _, ok := subitem.(*ast.ObjectType); !ok { items = append(items, item) return items, frontier } } // Great! We have a match go through all the items and flatten for _, elem := range ot.List { // Add it to the frontier so that we can recurse frontier = append(frontier, &ast.ObjectItem{ Keys: item.Keys, Assign: item.Assign, Val: elem, LeadComment: item.LeadComment, LineComment: item.LineComment, }) } return items, frontier } func flattenObjectType( ot *ast.ObjectType, item *ast.ObjectItem, items []*ast.ObjectItem, frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { // If the list has no items we do not have to flatten anything if ot.List.Items == nil { items = append(items, item) return items, frontier } // All the elements of this object must also be objects! for _, subitem := range ot.List.Items { if _, ok := subitem.Val.(*ast.ObjectType); !ok { items = append(items, item) return items, frontier } } // Great! We have a match go through all the items and flatten for _, subitem := range ot.List.Items { // Copy the new key keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) copy(keys, item.Keys) copy(keys[len(item.Keys):], subitem.Keys) // Add it to the frontier so that we can recurse frontier = append(frontier, &ast.ObjectItem{ Keys: keys, Assign: item.Assign, Val: subitem.Val, LeadComment: item.LeadComment, LineComment: item.LineComment, }) } return items, frontier }
118
eks-distro-build-tooling
aws
Go
package parser import ( "errors" "fmt" "github.com/hashicorp/hcl/hcl/ast" hcltoken "github.com/hashicorp/hcl/hcl/token" "github.com/hashicorp/hcl/json/scanner" "github.com/hashicorp/hcl/json/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = fmt.Errorf("%s: %s", pos, msg) } // The root must be an object in JSON object, err := p.object() if scerr != nil { return nil, scerr } if err != nil { return nil, err } // We make our final node an object list so it is more HCL compatible f.Node = object.List // Flatten it, which finds patterns and turns them into more HCL-like // AST trees. flattenObjects(f.Node) return f, nil } func (p *Parser) objectList() (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // Check for a followup comma. If it isn't a comma, then we're done if tok := p.scan(); tok.Type != token.COMMA { break } } return node, nil } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } switch p.tok.Type { case token.COLON: pos := p.tok.Pos o.Assign = hcltoken.Pos{ Filename: pos.Filename, Offset: pos.Offset, Line: pos.Line, Column: pos.Column, } o.Val, err = p.objectValue() if err != nil { return nil, err } } return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: return nil, errEofToken case token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{ Token: p.tok.HCLToken(), }) case token.COLON: // If we have a zero keycount it means that we never got // an object key, i.e. `{ :`. This is a syntax error. if keyCount == 0 { return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } // Done return keys, nil case token.ILLEGAL: return nil, errors.New("illegal") default: return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) objectValue() (ast.Node, error) { defer un(trace(p, "ParseObjectValue")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.EOF: return nil, errEofToken } return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (*ast.ObjectType, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.LBRACE: return p.objectType() case token.EOF: return nil, errEofToken } return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{} l, err := p.objectList() // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } o.List = l return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{} for { tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.STRING: node, err := p.literalType() if err != nil { return nil, err } l.Add(node) case token.COMMA: continue case token.LBRACE: node, err := p.objectType() if err != nil { return nil, err } l.Add(node) case token.BOOL: // TODO(arslan) should we support? not supported by HCL yet case token.LBRACK: // TODO(arslan) should we support nested lists? Even though it's // written in README of HCL, it's not a part of the grammar // (not defined in parse.y) case token.RBRACK: // finished return l, nil default: return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok.HCLToken(), }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } p.tok = p.sc.Scan() return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
314
eks-distro-build-tooling
aws
Go
package scanner import ( "bytes" "fmt" "os" "unicode" "unicode/utf8" "github.com/hashicorp/hcl/json/token" ) // eof represents a marker rune for the end of the reader. const eof = rune(0) // Scanner defines a lexical scanner type Scanner struct { buf *bytes.Buffer // Source buffer for advancing and scanning src []byte // Source buffer for immutable access // Source Position srcPos token.Pos // current position prevPos token.Pos // previous position, used for peek() method lastCharLen int // length of last character in bytes lastLineLen int // length of last line in characters (for correct column reporting) tokStart int // token text start position tokEnd int // token text end position // Error is called for each error encountered. If no Error // function is set, the error is reported to os.Stderr. Error func(pos token.Pos, msg string) // ErrorCount is incremented by one for each error encountered. ErrorCount int // tokPos is the start position of most recently scanned token; set by // Scan. The Filename field is always left untouched by the Scanner. If // an error is reported (via Error) and Position is invalid, the scanner is // not inside a token. tokPos token.Pos } // New creates and initializes a new instance of Scanner using src as // its source content. func New(src []byte) *Scanner { // even though we accept a src, we read from a io.Reader compatible type // (*bytes.Buffer). So in the future we might easily change it to streaming // read. b := bytes.NewBuffer(src) s := &Scanner{ buf: b, src: src, } // srcPosition always starts with 1 s.srcPos.Line = 1 return s } // next reads the next rune from the bufferred reader. Returns the rune(0) if // an error occurs (or io.EOF is returned). func (s *Scanner) next() rune { ch, size, err := s.buf.ReadRune() if err != nil { // advance for error reporting s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size return eof } if ch == utf8.RuneError && size == 1 { s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size s.err("illegal UTF-8 encoding") return ch } // remember last position s.prevPos = s.srcPos s.srcPos.Column++ s.lastCharLen = size s.srcPos.Offset += size if ch == '\n' { s.srcPos.Line++ s.lastLineLen = s.srcPos.Column s.srcPos.Column = 0 } // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch } // unread unreads the previous read Rune and updates the source position func (s *Scanner) unread() { if err := s.buf.UnreadRune(); err != nil { panic(err) // this is user fault, we should catch it } s.srcPos = s.prevPos // put back last position } // peek returns the next rune without advancing the reader. func (s *Scanner) peek() rune { peek, _, err := s.buf.ReadRune() if err != nil { return eof } s.buf.UnreadRune() return peek } // Scan scans the next token and returns the token. func (s *Scanner) Scan() token.Token { ch := s.next() // skip white space for isWhitespace(ch) { ch = s.next() } var tok token.Type // token text markings s.tokStart = s.srcPos.Offset - s.lastCharLen // token position, initial next() is moving the offset by one(size of rune // actually), though we are interested with the starting point s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen if s.srcPos.Column > 0 { // common case: last character was not a '\n' s.tokPos.Line = s.srcPos.Line s.tokPos.Column = s.srcPos.Column } else { // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) s.tokPos.Line = s.srcPos.Line - 1 s.tokPos.Column = s.lastLineLen } switch { case isLetter(ch): lit := s.scanIdentifier() if lit == "true" || lit == "false" { tok = token.BOOL } else if lit == "null" { tok = token.NULL } else { s.err("illegal char") } case isDecimal(ch): tok = s.scanNumber(ch) default: switch ch { case eof: tok = token.EOF case '"': tok = token.STRING s.scanString() case '.': tok = token.PERIOD ch = s.peek() if isDecimal(ch) { tok = token.FLOAT ch = s.scanMantissa(ch) ch = s.scanExponent(ch) } case '[': tok = token.LBRACK case ']': tok = token.RBRACK case '{': tok = token.LBRACE case '}': tok = token.RBRACE case ',': tok = token.COMMA case ':': tok = token.COLON case '-': if isDecimal(s.peek()) { ch := s.next() tok = s.scanNumber(ch) } else { s.err("illegal char") } default: s.err("illegal char: " + string(ch)) } } // finish token ending s.tokEnd = s.srcPos.Offset // create token literal var tokenText string if s.tokStart >= 0 { tokenText = string(s.src[s.tokStart:s.tokEnd]) } s.tokStart = s.tokEnd // ensure idempotency of tokenText() call return token.Token{ Type: tok, Pos: s.tokPos, Text: tokenText, } } // scanNumber scans a HCL number definition starting with the given rune func (s *Scanner) scanNumber(ch rune) token.Type { zero := ch == '0' pos := s.srcPos s.scanMantissa(ch) ch = s.next() // seek forward if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if ch != eof { s.unread() } // If we have a larger number and this is zero, error if zero && pos != s.srcPos { s.err("numbers cannot start with 0") } return token.NUMBER } // scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false for isDecimal(ch) { ch = s.next() scanned = true } if scanned && ch != eof { s.unread() } return ch } // scanFraction scans the fraction after the '.' rune func (s *Scanner) scanFraction(ch rune) rune { if ch == '.' { ch = s.peek() // we peek just to see if we can move forward ch = s.scanMantissa(ch) } return ch } // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' // rune. func (s *Scanner) scanExponent(ch rune) rune { if ch == 'e' || ch == 'E' { ch = s.next() if ch == '-' || ch == '+' { ch = s.next() } ch = s.scanMantissa(ch) } return ch } // scanString scans a quoted string func (s *Scanner) scanString() { braces := 0 for { // '"' opening already consumed // read character after quote ch := s.next() if ch == '\n' || ch < 0 || ch == eof { s.err("literal not terminated") return } if ch == '"' { break } // If we're going into a ${} then we can ignore quotes for awhile if braces == 0 && ch == '$' && s.peek() == '{' { braces++ s.next() } else if braces > 0 && ch == '{' { braces++ } if braces > 0 && ch == '}' { braces-- } if ch == '\\' { s.scanEscape() } } return } // scanEscape scans an escape sequence func (s *Scanner) scanEscape() rune { // http://en.cppreference.com/w/cpp/language/escape ch := s.next() // read character after '/' switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': // nothing to do case '0', '1', '2', '3', '4', '5', '6', '7': // octal notation ch = s.scanDigits(ch, 8, 3) case 'x': // hexademical notation ch = s.scanDigits(s.next(), 16, 2) case 'u': // universal character name ch = s.scanDigits(s.next(), 16, 4) case 'U': // universal character name ch = s.scanDigits(s.next(), 16, 8) default: s.err("illegal char escape") } return ch } // scanDigits scans a rune with the given base for n times. For example an // octal notation \184 would yield in scanDigits(ch, 8, 3) func (s *Scanner) scanDigits(ch rune, base, n int) rune { for n > 0 && digitVal(ch) < base { ch = s.next() n-- } if n > 0 { s.err("illegal char escape") } // we scanned all digits, put the last non digit char back s.unread() return ch } // scanIdentifier scans an identifier and returns the literal string func (s *Scanner) scanIdentifier() string { offs := s.srcPos.Offset - s.lastCharLen ch := s.next() for isLetter(ch) || isDigit(ch) || ch == '-' { ch = s.next() } if ch != eof { s.unread() // we got identifier, put back latest char } return string(s.src[offs:s.srcPos.Offset]) } // recentPosition returns the position of the character immediately after the // character or token returned by the last call to Scan. func (s *Scanner) recentPosition() (pos token.Pos) { pos.Offset = s.srcPos.Offset - s.lastCharLen switch { case s.srcPos.Column > 0: // common case: last character was not a '\n' pos.Line = s.srcPos.Line pos.Column = s.srcPos.Column case s.lastLineLen > 0: // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) pos.Line = s.srcPos.Line - 1 pos.Column = s.lastLineLen default: // at the beginning of the source pos.Line = 1 pos.Column = 1 } return } // err prints the error of any scanning to s.Error function. If the function is // not defined, by default it prints them to os.Stderr func (s *Scanner) err(msg string) { s.ErrorCount++ pos := s.recentPosition() if s.Error != nil { s.Error(pos, msg) return } fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) } // isHexadecimal returns true if the given rune is a letter func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } // isHexadecimal returns true if the given rune is a decimal digit func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } // isHexadecimal returns true if the given rune is a decimal number func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } // isHexadecimal returns true if the given rune is an hexadecimal number func isHexadecimal(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' } // isWhitespace returns true if the rune is a space, tab, newline or carriage return func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } // digitVal returns the integer value of a given octal,decimal or hexadecimal rune func digitVal(ch rune) int { switch { case '0' <= ch && ch <= '9': return int(ch - '0') case 'a' <= ch && ch <= 'f': return int(ch - 'a' + 10) case 'A' <= ch && ch <= 'F': return int(ch - 'A' + 10) } return 16 // larger than any legal digit val }
452
eks-distro-build-tooling
aws
Go
package token import "fmt" // Pos describes an arbitrary source position // including the file, line, and column location. // A Position is valid if the line number is > 0. type Pos struct { Filename string // filename, if any Offset int // offset, starting at 0 Line int // line number, starting at 1 Column int // column number, starting at 1 (character count) } // IsValid returns true if the position is valid. func (p *Pos) IsValid() bool { return p.Line > 0 } // String returns a string in one of several forms: // // file:line:column valid position with file name // line:column valid position without file name // file invalid position with file name // - invalid position without file name func (p Pos) String() string { s := p.Filename if p.IsValid() { if s != "" { s += ":" } s += fmt.Sprintf("%d:%d", p.Line, p.Column) } if s == "" { s = "-" } return s } // Before reports whether the position p is before u. func (p Pos) Before(u Pos) bool { return u.Offset > p.Offset || u.Line > p.Line } // After reports whether the position p is after u. func (p Pos) After(u Pos) bool { return u.Offset < p.Offset || u.Line < p.Line }
47
eks-distro-build-tooling
aws
Go
package token import ( "fmt" "strconv" hcltoken "github.com/hashicorp/hcl/hcl/token" ) // Token defines a single HCL token which can be obtained via the Scanner type Token struct { Type Type Pos Pos Text string } // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) type Type int const ( // Special tokens ILLEGAL Type = iota EOF identifier_beg literal_beg NUMBER // 12345 FLOAT // 123.45 BOOL // true,false STRING // "abc" NULL // null literal_end identifier_end operator_beg LBRACK // [ LBRACE // { COMMA // , PERIOD // . COLON // : RBRACK // ] RBRACE // } operator_end ) var tokens = [...]string{ ILLEGAL: "ILLEGAL", EOF: "EOF", NUMBER: "NUMBER", FLOAT: "FLOAT", BOOL: "BOOL", STRING: "STRING", NULL: "NULL", LBRACK: "LBRACK", LBRACE: "LBRACE", COMMA: "COMMA", PERIOD: "PERIOD", COLON: "COLON", RBRACK: "RBRACK", RBRACE: "RBRACE", } // String returns the string corresponding to the token tok. func (t Type) String() string { s := "" if 0 <= t && t < Type(len(tokens)) { s = tokens[t] } if s == "" { s = "token(" + strconv.Itoa(int(t)) + ")" } return s } // IsIdentifier returns true for tokens corresponding to identifiers and basic // type literals; it returns false otherwise. func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } // IsLiteral returns true for tokens corresponding to basic type literals; it // returns false otherwise. func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } // IsOperator returns true for tokens corresponding to operators and // delimiters; it returns false otherwise. func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } // String returns the token's literal text. Note that this is only // applicable for certain token types, such as token.IDENT, // token.STRING, etc.. func (t Token) String() string { return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) } // HCLToken converts this token to an HCL token. // // The token type must be a literal type or this will panic. func (t Token) HCLToken() hcltoken.Token { switch t.Type { case BOOL: return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} case FLOAT: return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} case NULL: return hcltoken.Token{Type: hcltoken.STRING, Text: ""} case NUMBER: return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} case STRING: return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} default: panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) } }
119
eks-distro-build-tooling
aws
Go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
224
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
258
eks-distro-build-tooling
aws
Go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
22
eks-distro-build-tooling
aws
Go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) { c.lock.Lock() c.lru.Remove(key) c.lock.Unlock() } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { c.lock.Lock() c.lru.RemoveOldest() c.lock.Unlock() } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
117
eks-distro-build-tooling
aws
Go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
162
eks-distro-build-tooling
aws
Go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clear all cache entries Purge() }
37