repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
ec2-macos-init | aws | Go | package ec2macosinit
import (
"bufio"
_ "embed"
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
// ConfigurationManagementWarning is a header warning for sshd_config
ConfigurationManagementWarning = "### This file is managed by EC2 macOS Init, changes will be applied on every boot. To disable set secureSSHDConfig = false in /usr/local/aws/ec2-macos-init/init.toml ###"
// InlineWarning is a warning line for each entry to help encourage users to avoid doing the risky configuration change
InlineWarning = "# EC2 Configuration: The follow setting is recommended by EC2 and set on boot. Set secureSSHDConfig = false in /usr/local/aws/ec2-macos-init/init.toml to disable.\n"
// DefaultsCmd is the path to the script edit macOS defaults
DefaultsCmd = "/usr/bin/defaults"
// DefaultsRead is the command to read from a plist
DefaultsRead = "read"
// DefaultsReadType is the command to read the type of a parameter from a plist
DefaultsReadType = "read-type"
// DefaultsWrite is the command to write a value of a parameter to a plist
DefaultsWrite = "write"
// sshdConfigFile is the default path for the SSHD configuration file
sshdConfigFile = "/etc/ssh/sshd_config"
// ec2SSHDConfigFile is the ssh configs file path
ec2SSHDConfigFile = "/etc/ssh/sshd_config.d/050-ec2-macos.conf"
// macOSSSHDConfigDir is Apple's custom ssh configs
macOSSSHDConfigDir = "/etc/ssh/sshd_config.d"
)
//go:embed assets/ec2-macos-ssh.txt
var ec2SSHData string
var (
// numberOfBytesInCustomSSHFile is the number of bytes in assets/ec2-macos-ssh.txt
numberOfBytesInCustomSSHFile = len(ec2SSHData)
)
// ModifySysctl contains sysctl values we want to modify
type ModifySysctl struct {
Value string `toml:"value"`
}
// ModifyDefaults contains the necessary values to change a parameter in a given plist
type ModifyDefaults struct {
Plist string `toml:"plist"`
Parameter string `toml:"parameter"`
Type string `toml:"type"`
Value string `toml:"value"`
}
// SystemConfigModule contains all necessary configuration fields for running a System Configuration module.
type SystemConfigModule struct {
SecureSSHDConfig bool `toml:"secureSSHDConfig"`
ModifySysctl []ModifySysctl `toml:"Sysctl"`
ModifyDefaults []ModifyDefaults `toml:"Defaults"`
}
// Do for the SystemConfigModule modifies system configuration such as sysctl, plist defaults, and secures the SSHD
// configuration file.
func (c *SystemConfigModule) Do(ctx *ModuleContext) (message string, err error) {
wg := sync.WaitGroup{}
// Secure SSHD configuration
var sshdConfigChanges, sshdUnchanged, sshdErrors int32
if c.SecureSSHDConfig {
wg.Add(1)
go func() {
err := writeEC2SSHConfigs()
if err != nil {
ctx.Logger.Errorf("Error writing ec2 custom ssh configs: %s", err)
}
wg.Done()
}()
wg.Add(1)
go func() {
changes, err := c.configureSSHD(ctx)
if err != nil {
atomic.AddInt32(&sshdErrors, 1)
ctx.Logger.Errorf("Error while attempting to correct SSHD configuration: %s", err)
}
if changes {
// Add change for messaging
atomic.AddInt32(&sshdConfigChanges, 1)
} else {
// No changes made
atomic.AddInt32(&sshdUnchanged, 1)
}
wg.Done()
}()
}
// Modifications using sysctl
var sysctlChanged, sysctlUnchanged, sysctlErrors int32
for _, m := range c.ModifySysctl {
wg.Add(1)
go func(val string) {
changed, err := modifySysctl(val)
if err != nil {
atomic.AddInt32(&sysctlErrors, 1)
ctx.Logger.Errorf("Error while attempting to modify sysctl property [%s]: %s", val, err)
}
if changed { // changed a property
atomic.AddInt32(&sysctlChanged, 1)
ctx.Logger.Infof("Modified sysctl property [%s]", val)
} else { // did not change a property
atomic.AddInt32(&sysctlUnchanged, 1)
ctx.Logger.Infof("Did not modify sysctl property [%s]", val)
}
wg.Done()
}(m.Value)
}
// Modifications using defaults
var defaultsChanged, defaultsUnchanged, defaultsErrors int32
for _, m := range c.ModifyDefaults {
wg.Add(1)
go func(modifyDefault ModifyDefaults) {
changed, err := modifyDefaults(modifyDefault)
if err != nil {
atomic.AddInt32(&defaultsErrors, 1)
ctx.Logger.Errorf("Error while attempting to modify default [%s]: %s", modifyDefault.Parameter, err)
}
if changed { // changed a property
atomic.AddInt32(&defaultsChanged, 1)
ctx.Logger.Infof("Modified default [%s]", modifyDefault.Parameter)
} else { // did not change a property
atomic.AddInt32(&defaultsUnchanged, 1)
ctx.Logger.Infof("Did not modify default [%s]", modifyDefault.Parameter)
}
wg.Done()
}(m)
}
// Wait for everything to finish
wg.Wait()
// Craft output message
totalChanged := sysctlChanged + defaultsChanged + sshdConfigChanges
totalUnchanged := sysctlUnchanged + defaultsUnchanged + sshdUnchanged
totalErrors := sysctlErrors + defaultsErrors + sshdErrors
baseMessage := fmt.Sprintf("[%d changed / %d unchanged / %d error(s)] out of %d requested changes",
totalChanged, totalUnchanged, totalErrors, totalChanged+totalUnchanged)
if totalErrors > 0 {
return "", fmt.Errorf("one or more system configuration changes were unsuccessful: %s", baseMessage)
}
return "system configuration completed with " + baseMessage, nil
}
// writeEC2SSHConfigs writes custom ec2 ssh configs file
func writeEC2SSHConfigs() (err error) {
err = os.MkdirAll(macOSSSHDConfigDir, 0755)
if err != nil {
return fmt.Errorf("error while attempting to create %s dir: %s", macOSSSHDConfigDir, err)
}
f, err := os.OpenFile(ec2SSHDConfigFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return fmt.Errorf("error while attempting to create %s file: %s", ec2SSHDConfigFile, err)
}
defer f.Close()
n, err := f.WriteString(ec2SSHData)
if err != nil {
return fmt.Errorf("error while writing ec2-macos ssh data on file: %s. %s", ec2SSHDConfigFile, err)
}
if n != numberOfBytesInCustomSSHFile {
return fmt.Errorf("error while writing ec2-macos ssh data on file: %s. %d should equal %d", ec2SSHDConfigFile, n, numberOfBytesInCustomSSHFile)
}
return nil
}
// modifySysctl modifies a sysctl parameter, if necessary.
func modifySysctl(value string) (changed bool, err error) {
// Separate parameter
inputSplit := strings.Split(value, "=")
if len(inputSplit) != 2 {
return false, fmt.Errorf("ec2macosinit: unable to split input sysctl value: %s", value)
}
param := inputSplit[0]
// Check current value
output, err := executeCommand([]string{"sysctl", "-e", param}, "", []string{})
if err != nil {
return false, fmt.Errorf("ec2macosinit: unable to get current value from sysctl: %s", err)
}
if strings.TrimSpace(output.stdout) == value {
return false, nil // Exit early if value is already set
}
// Attempt to set the value five times, with 100ms in between each attempt
err = retry(5, 100*time.Millisecond, func() (err error) {
// Set value
_, err = executeCommand([]string{"sysctl", value}, "", []string{})
if err != nil {
return fmt.Errorf("ec2macosinit: unable to set desired value using sysctl: %s", err)
}
// Validate new value
output, err = executeCommand([]string{"sysctl", "-e", param}, "", []string{})
if err != nil {
return fmt.Errorf("ec2macosinit: unable to get current value from sysctl: %s", err)
}
if strings.TrimSpace(output.stdout) != value {
return fmt.Errorf("ec2macosinit: error setting new value using sysctl: %s", output.stdout)
}
return nil
})
if err != nil {
return false, err
}
return true, nil
}
// modifyDefaults modifies a default, if necessary.
func modifyDefaults(modifyDefault ModifyDefaults) (changed bool, err error) {
// Check to see if current value already matches
err = checkDefaultsValue(modifyDefault)
if err == nil {
return false, err // Exit early if value is already set correctly, otherwise attempt to update value
}
// If the values did not match, update value in the plist
err = updateDefaultsValue(modifyDefault)
if err != nil {
return false, fmt.Errorf("ec2macosinit: unable to update value for plist %s, parameter %s to value %s", modifyDefault.Plist, modifyDefault.Parameter, modifyDefault.Value)
}
// Validate new value
err = checkDefaultsValue(modifyDefault)
if err != nil {
return false, fmt.Errorf("ec2macosinit: verification failed for updating value for plist %s, parameter %s", modifyDefault.Plist, modifyDefault.Parameter)
}
return true, nil
}
// checkDefaultsValue checks the value for a given parameter in a plist.
func checkDefaultsValue(modifyDefault ModifyDefaults) (err error) {
// Check value of current parameter in plist
readCmd := []string{DefaultsCmd, DefaultsRead, modifyDefault.Plist, modifyDefault.Parameter}
out, err := executeCommand(readCmd, "", []string{})
if err != nil {
return err
}
// Get value by trimming whitespace
actualValue := strings.TrimSpace(out.stdout)
// Run comparisons depending on the parameter's type
switch modifyDefault.Type {
// Only implemented for bool[ean] now, more types to be implemented later
case "bool", "boolean":
return checkBoolean(modifyDefault.Value, actualValue)
}
return nil
}
// updateDefaultsValue updates the value of a parameter in a given plist.
func updateDefaultsValue(modifyDefault ModifyDefaults) (err error) {
// Update the value, specifying its type
writeCmd := []string{DefaultsCmd, DefaultsWrite, modifyDefault.Plist, modifyDefault.Parameter, "-" + modifyDefault.Type, modifyDefault.Value}
_, err = executeCommand(writeCmd, "", []string{})
return err
}
// checkBoolean is designed to convert both inputs into a boolean and compare.
func checkBoolean(expectedValue, actualValue string) (err error) {
// Convert our expected value into a boolean
expectedOutput, err := strconv.ParseBool(expectedValue)
if err != nil {
return err
}
// Convert our actual value into a boolean
actualOutput, err := strconv.ParseBool(actualValue)
if err != nil {
return err
}
if expectedOutput != actualOutput {
return fmt.Errorf("ec2macosinit: boolean values did not match - expected: %v, actual: %v", expectedOutput, actualOutput)
} else {
return nil
}
}
// checkSSHDReturn uses launchctl to find the exit code for ssh.plist and returns if it was successful
func (c *SystemConfigModule) checkSSHDReturn() (success bool, err error) {
// Launchd can provide status on processes running, this gets that output to be parsed
out, _ := executeCommand([]string{"launchctl", "list"}, "", []string{})
// Start a line by line scanner
scanner := bufio.NewScanner(strings.NewReader(out.stdout))
for scanner.Scan() {
// Fetch the next line
line := scanner.Text()
// If the line contains "sshd." then the real SSHD is started, not just the dummy sshd wrapper
if strings.Contains(line, "sshd.") {
// Strip the newline, then split on tabs to get fields
launchctlFields := strings.Split(strings.Replace(line, "\n", "", -1), "\t")
// Take the second field which is the process exit code on start
retValue, err := strconv.ParseBool(launchctlFields[1])
if err != nil {
return false, fmt.Errorf("ec2macosinit: failed to get sshd exit code: %s", err)
}
// Return true for zero (good exit) otherwise false
return !retValue, nil
}
}
// If all of "launchctl list" output doesn't have a status, simply return false since its not running
return false, nil
}
// checkAndWriteWarning is a helper function to write out the warning if not present
func checkAndWriteWarning(lastLine string, tempSSHDFile *os.File) (err error) {
if !strings.Contains(lastLine, "EC2 Configuration") && lastLine != InlineWarning {
_, err := tempSSHDFile.WriteString(InlineWarning)
if err != nil {
return fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
}
return nil
}
// configureSSHD scans the SSHConfigFile and writes to a temporary file if changes are detected. If changes are detected
// it replaces the SSHConfigFile. If SSHD is detected as running, it restarts it.
func (c *SystemConfigModule) configureSSHD(ctx *ModuleContext) (configChanges bool, err error) {
// Look for each thing and fix them if found
sshdFile, err := os.Open(sshdConfigFile)
if err != nil {
log.Fatal(err)
}
defer sshdFile.Close()
// Create scanner for the SSHD file
scanner := bufio.NewScanner(sshdFile)
// Create a new temporary file, if changes are detected, it will be moved over the existing file
tempSSHDFile, err := os.CreateTemp("", "sshd_config_fixed.*")
if err != nil {
return false, fmt.Errorf("ec2macosinit: error creating %s", tempSSHDFile.Name())
}
defer tempSSHDFile.Close()
// Keep track of line number simply for confirming warning header
var lineNumber int
// Track the last line for adding in warning when needed
var lastLine string
// Iterate over every line in the file
for scanner.Scan() {
lineNumber++
currentLine := scanner.Text()
// If this is the first line in the file, look for the warning header and add if missing
if lineNumber == 1 && currentLine != ConfigurationManagementWarning {
_, err = tempSSHDFile.WriteString(ConfigurationManagementWarning + "\n")
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
configChanges = true
lastLine = ConfigurationManagementWarning
}
switch {
// Check if PasswordAuthentication is enabled, if so put in warning and change the config
// PasswordAuthentication allows SSHD to respond to user password brute force attacks and can result in lowered
// security, especially if a simple password is set. In EC2, this is undesired and therefore turned off by default
case strings.Contains(currentLine, "PasswordAuthentication yes"):
err = checkAndWriteWarning(lastLine, tempSSHDFile)
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
// Overwrite with desired configuration line
_, err = tempSSHDFile.WriteString("PasswordAuthentication no\n")
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
// Changes detected so this will enforce updating the file later
configChanges = true
// Check if PAM is enabled, if so, put in warning and change the config
// PAM authentication enables challenge-response authentication which can allow brute force attacks on SSHD
// In EC2, this is undesired and therefore turned off by default
case strings.TrimSpace(currentLine) == "UsePAM yes":
err = checkAndWriteWarning(lastLine, tempSSHDFile)
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
// Overwrite with desired configuration line
_, err = tempSSHDFile.WriteString("UsePAM no\n")
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
// Changes detected so this will enforce updating the file later
configChanges = true
// Check if Challenge-response is enabled, if so put in warning and change the config
// Challenge-response authentication via SSHD can allow brute force attacks for SSHD. In EC2, this is undesired
// and therefore turned off by default
case strings.Contains(currentLine, "ChallengeResponseAuthentication yes"):
err = checkAndWriteWarning(lastLine, tempSSHDFile)
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
// Overwrite with desired configuration line
_, err = tempSSHDFile.WriteString("ChallengeResponseAuthentication no\n")
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
// Changes detected so this will enforce updating the file later
configChanges = true
default:
// Otherwise write the line as is to the temp file without modification
_, err = tempSSHDFile.WriteString(currentLine + "\n")
if err != nil {
return false, fmt.Errorf("ec2macosinit: error writing to %s", tempSSHDFile.Name())
}
}
// Rotate the current line to the last line so that comments can be inserted above rewritten lines
lastLine = currentLine
}
if err := scanner.Err(); err != nil {
return false, fmt.Errorf("ec2macosinit: error reading %s: %s", sshdConfigFile, err)
}
// If there was a change detected, then copy the file and restart sshd
if configChanges {
// Get the current status of SSHD, if its not running, then it should not be started
sshdRunning, err := c.checkSSHDReturn()
if err != nil {
ctx.Logger.Errorf("ec2macosinit: unable to get SSHD status: %s", err)
}
// Move the temporary file to the SSHDConfigFile
err = os.Rename(tempSSHDFile.Name(), sshdConfigFile)
if err != nil {
return false, fmt.Errorf("ec2macosinit: unable to save updated configuration to %s", sshdConfigFile)
}
// Temporary files have different permissions by design, correct the permissions for SSHDConfigFile
err = os.Chmod(sshdConfigFile, 0644)
if err != nil {
return false, fmt.Errorf("ec2macosinit: unable to set correct permssions of %s", sshdConfigFile)
}
// If SSHD was detected as running, then a restart must happen, if it was not running, the work is complete
if sshdRunning {
// Unload and load SSHD, the launchctl method for re-loading SSHD with new configuration
_, err = executeCommand([]string{"/bin/zsh", "-c", "launchctl unload /System/Library/LaunchDaemons/ssh.plist"}, "", []string{})
if err != nil {
ctx.Logger.Errorf("ec2macosinit: unable to stop SSHD %s", err)
return false, fmt.Errorf("ec2macosinit: unable to stop SSHD %s", err)
}
_, err = executeCommand([]string{"/bin/zsh", "-c", "launchctl load -w /System/Library/LaunchDaemons/ssh.plist"}, "", []string{})
if err != nil {
ctx.Logger.Errorf("ec2macosinit: unable to restart SSHD %s", err)
return false, fmt.Errorf("ec2macosinit: unable to restart SSHD %s", err)
}
// Add the message to state that config was modified and SSHD was correctly restarted
ctx.Logger.Info("Modified SSHD configuration and restarted SSHD for new configuration")
} else {
// Since SSHD was not running, only change the configuration but no restarting is desired
ctx.Logger.Info("Modified SSHD configuration, did not restart SSHD since it was not running")
}
} else {
// There were no changes detected from desired state, simply exit and let the temp file be
ctx.Logger.Info("Did not modify SSHD configuration")
}
// Return the message to caller for logging
return configChanges, nil
}
| 479 |
ec2-macos-init | aws | Go | package ec2macosinit
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
)
// UserDataModule contains contains all necessary configuration fields for running a User Data module.
type UserDataModule struct {
// ExecuteUserData must be set to `true` for the userdata script contents to
// be executed.
ExecuteUserData bool `toml:"ExecuteUserData"`
}
// Do fetches userdata and writes it to a file in the instance history. The
// written script is then executed when ExecuteUserData is true.
func (m *UserDataModule) Do(mctx *ModuleContext) (message string, err error) {
const scriptFileName = "userdata"
userdataScript := filepath.Join(mctx.InstanceHistoryPath(), scriptFileName)
// Get user data from IMDS
ud, respCode, err := mctx.IMDS.getIMDSProperty("user-data")
if err != nil {
return "", fmt.Errorf("ec2macosinit: error getting user data from IMDS: %s\n", err)
}
if respCode == 404 { // 404 = no user data provided, exit nicely
return "no user data provided through IMDS", nil
}
if respCode != 200 { // 200 = ok
return "", fmt.Errorf("ec2macosinit: received an unexpected response code from IMDS: %d - %s\n", respCode, err)
}
err = writeShellScript(userdataScript, userdataReader(ud))
if err != nil {
return "", fmt.Errorf("userdata script: %w", err)
}
// If we don't want to execute the user data, exit nicely - we're done
if !m.ExecuteUserData {
return "successfully handled user data with no execution request", nil
}
// Execute user data script
out, err := executeCommand([]string{userdataScript}, "", []string{})
if err != nil {
if strings.Contains(err.Error(), "exec format error") {
contentType := http.DetectContentType([]byte(ud))
return fmt.Sprintf("provided user data is not executable (detected type: %s)", contentType), nil
} else {
return fmt.Sprintf("error while running user data with stdout: [%s] and stderr: [%s]", out.stdout, out.stderr), err
}
}
return fmt.Sprintf("successfully ran user data with stdout: [%s] and stderr: [%s]", out.stdout, out.stderr), nil
}
// userdataReader provides a decoded reader for the provided userdata text.
// Userdata text may be encoded either as plain text or as base64 encoded plain
// text, so we detect and prepare a reader depending on what's given.
func userdataReader(text string) io.Reader {
// Attempt to base64 decode userdata.
//
// This maintains consistency alongside Amazon Linux 2's cloud-init, which states:
//
// "Some tools and users will base64 encode their data before handing it to
// an API like boto, which will base64 encode it again, so we try to decode."
//
decoded, err := base64.StdEncoding.DecodeString(text)
if err == nil {
return bytes.NewBuffer(decoded)
} else {
return bytes.NewBufferString(text)
}
}
// writeShellScript writes an executable file to the provided path.
func writeShellScript(path string, rd io.Reader) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return err
}
_, err = io.Copy(f, rd)
if err != nil {
_ = f.Close()
return fmt.Errorf("write contents: %w", err)
}
return f.Close()
}
| 97 |
ec2-macos-init | aws | Go | package ec2macosinit
import (
"fmt"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
func TestUserdataReader_ValidTexts(t *testing.T) {
const expected = "hello, world!"
texts := []string{
"aGVsbG8sIHdvcmxkIQ==", // printf 'hello, world!' | base64 -w0
"hello, world!",
}
for i, text := range texts {
t.Run(fmt.Sprintf("Text_%d", i), func(t *testing.T) {
t.Logf("input: %q", text)
actual, err := io.ReadAll(userdataReader(text))
assert.NoError(t, err, "should prepare a reader")
assert.Equal(t, expected, string(actual), "should decode valid texts")
})
}
}
| 27 |
ec2-macos-init | aws | Go | package ec2macosinit
import (
"crypto/rand"
"encoding/base64"
"fmt"
"path/filepath"
"strings"
)
const (
// PasswordLength is the default number of characters that the auto-generated password should be
PasswordLength = 25
// DsclPath is the default path for the dscl utility needed for the functions in this file
DsclPath = "/usr/bin/dscl"
)
// UserManagementModule contains the necessary values to run a User Management Module
type UserManagementModule struct {
RandomizePassword bool `toml:"RandomizePassword"`
User string `toml:"User"`
}
// Do for the UserManagementModule is the primary entry point for the User Management Module.
func (c *UserManagementModule) Do(ctx *ModuleContext) (message string, err error) {
// Check if randomizing password is requested. If so, then perform action, otherwise return with no work to do
if c.RandomizePassword {
message, err = c.randomizePassword()
if err != nil {
return "", fmt.Errorf("ec2macosinit: failed to randomize password: %s", err)
}
} else {
return "randomizing password disabled, skipping", nil
}
// For now, `message` will only be set if RandomizePassword is true. Instead of returning above, it is returned here
// for readability and future additions to the module
return message, nil
}
// isSecureTokenSet wraps the sysadminctl call to provide a bool for checking if its enabled
// The way to detect if the Secure Token is set for a user is `sysadminctl`, here is an example for ec2-user:
// /usr/sbin/sysadminctl -secureTokenStatus ec2-user
// 2021-01-14 18:17:47.414 sysadminctl[96836:904874] Secure token is DISABLED for user ec2-user
// When enabled it shows:
// 2021-01-14 19:21:55.854 sysadminctl[14193:181530] Secure token is ENABLED for user ec2-user
func (c *UserManagementModule) isSecureTokenSet() (enabled bool, err error) {
// Fetch the text from the built-in tool sysadminctl
statusText, err := executeCommand([]string{"/usr/sbin/sysadminctl", "-secureTokenStatus", c.User}, "", []string{})
if err != nil {
return false, fmt.Errorf("ec2macosinit: unable to get Secure Token status for %s: %s", c.User, err)
}
// If the text has "ENABLED" then return true, otherwise return false
if strings.Contains(statusText.stdout, "Secure token is ENABLED") {
return true, nil
}
return false, nil
}
// disableSecureTokenCreation disables the default behavior to enable the Secure Token on the next user password change.
// From https://support.apple.com/guide/deployment-reference-macos/using-secure-and-bootstrap-tokens-apdff2cf769b/web
// This is the command used to avoid setting the SecureToken when changing the password
// /usr/bin/dscl . append /Users/ec2-user AuthenticationAuthority ";DisabledTags;SecureToken"
func (c *UserManagementModule) disableSecureTokenCreation() (err error) {
_, err = executeCommand([]string{DsclPath, ".", "append", filepath.Join("Users", c.User), "AuthenticationAuthority", ";DisabledTags;SecureToken"}, "", []string{})
if err != nil {
return fmt.Errorf("ec2macosinit: failed disable Secure Token creation: %s", err)
}
return nil
}
// enableSecureTokenCreation enables the default behavior to enable the Secure Token on the next user password change.
// From https://support.apple.com/guide/deployment-reference-macos/using-secure-and-bootstrap-tokens-apdff2cf769b/web
// This is the command used to remove the setting for the SecureToken when changing the password
// /usr/bin/dscl . delete /Users/ec2-user AuthenticationAuthority ";DisabledTags;SecureToken"
func (c *UserManagementModule) enableSecureTokenCreation() (err error) {
_, err = executeCommand([]string{DsclPath, ".", "delete", filepath.Join("Users", c.User), "AuthenticationAuthority", ";DisabledTags;SecureToken"}, "", []string{})
if err != nil {
return fmt.Errorf("ec2macosinit: failed to disable Secure Token creation: %s", err)
}
return nil
}
// changePassword changes the password to a provided string.
func (c *UserManagementModule) changePassword(password string) (err error) {
_, err = executeCommand([]string{DsclPath, ".", "-passwd", filepath.Join("Users", c.User), password}, "", []string{})
if err != nil {
return fmt.Errorf("ec2macosinit: failed to set %s's password: %s", c.User, err)
}
return nil
}
// randomizePassword confirms if the Secure Token is set and randomizes the user password.
// The password change functionality, at its core, is simply detecting if the user password can be randomized for
// the default "ec2-user" user. The complexity comes in when dealing with the Secure Token. From Big Sur onward, the
// Secure Token is set on all initial password changes, this is not ideal since future password changes would require
// knowing this random password. This process is built to avoid the Secure Token being set on this first randomization.
// The basic flow is:
// 1. Check for the Secure Token already being set which would prevent changing the password
// 2. Add a special property to avoid the Secure Token from being set
// 3. Change the password to a random string
// 4. Undo the special property so that the next password change will set the Secure Token
func (c *UserManagementModule) randomizePassword() (message string, err error) {
// This detection of the user probably needs to move into the Do() function when there is more to do, but since this
// is the first place the c.User is used, its handled here
// If user is undefined, default to ec2-user
if c.User == "" {
c.User = "ec2-user"
}
// Verify that user exists
exists, err := userExists(c.User)
if err != nil {
return "", fmt.Errorf("ec2macosinit: error while checking if user %s exists: %s\n", c.User, err)
}
if !exists { // if the user doesn't exist, error out
return "", fmt.Errorf("ec2macosinit: user %s does not exist\n", c.User)
}
// Check for Secure Token, if its already set then attempting to change the password will fail
secureTokenSet, err := c.isSecureTokenSet()
if err != nil {
return "", fmt.Errorf("ec2macosinit: unable to confirm Secure Token is DISABLED: %s", err)
}
// Only proceed if user doesn't have Secure Token enabled
if secureTokenSet {
return "", fmt.Errorf("ec2macosinit: unable to change password, Secure Token Set for %s", c.User)
}
// Change Secure Token behavior if needed
err = c.disableSecureTokenCreation()
if err != nil {
return "", fmt.Errorf("ec2macosinit: unable to disable Secure Token generation: %s", err)
}
defer func() {
// Set Secure Token behavior back if needed
deferErr := c.enableSecureTokenCreation()
if deferErr != nil {
// Catch a failure and change status returns to represent an error condition
message = "" // Overwrite new message to indicate error
err = fmt.Errorf("ec2macosinit: unable to enable Secure Token generation: %s %s", deferErr, err)
}
}()
// Generate random password
password, err := generateSecurePassword(PasswordLength)
if err != nil {
return "", fmt.Errorf("ec2macosinit: unable to generate secure password: %s", err)
}
// Change the password
err = c.changePassword(password)
if err != nil {
return "", fmt.Errorf("ec2macosinit: unable to set secure password: %s", err)
}
return fmt.Sprintf("successfully set secure password for %s", c.User), nil
}
// generateRandomBytes returns securely generated random bytes for use in generating a password
// It will return an error if the system's secure random number generator fails to function correctly
func generateRandomBytes(n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
return nil, fmt.Errorf("ec2macosinit: unable to read random bytes from OS: %s", err)
}
return b, nil
}
// generateSecurePassword generates a password securely for use in randomizePassword using the crypto/rand library
func generateSecurePassword(length int) (password string, err error) {
// Fetch the requested number of bytes, this ensures at least that much entropy
randomBytes, err := generateRandomBytes(length)
if err != nil {
return "", fmt.Errorf("ec2macosinit: unable to generate secure password %s", err)
}
// URLEncode it to have safe characters for passwords
source := base64.URLEncoding.EncodeToString(randomBytes)
// Return only the length requested since URL Encoding can result in longer strings
return source[0:length], nil
}
| 186 |
ec2-macos-init | aws | Go | package ec2macosinit
import (
"testing"
)
func TestUserManagementModule_Do(t *testing.T) {
var emptyCtx ModuleContext
type fields struct {
RandomizePassword bool
User string
}
type args struct {
ctx *ModuleContext
}
tests := []struct {
name string
fields fields
args args
wantMessage string
wantErr bool
}{
{"No Randomization", fields{RandomizePassword: false, User: "ec2-user"}, args{&emptyCtx}, "randomizing password disabled, skipping", false},
{"User doesn't exist", fields{RandomizePassword: true, User: "thereisnowaythisusercouldexist"}, args{&emptyCtx}, "", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &UserManagementModule{
RandomizePassword: tt.fields.RandomizePassword,
User: tt.fields.User,
}
gotMessage, err := c.Do(tt.args.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("Do() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotMessage != tt.wantMessage {
t.Errorf("Do() gotMessage = %v, want %v", gotMessage, tt.wantMessage)
}
})
}
}
func Test_generateRandomBytes(t *testing.T) {
type args struct {
n int
}
tests := []struct {
name string
args args
exampleResult []byte
wantErr bool
}{
{"Basic case", args{18}, []byte{'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', '=', '='}, false},
{"Slice of 1", args{1}, []byte{'A'}, false},
{"Empty case", args{0}, []byte{}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := generateRandomBytes(tt.args.n)
if (err != nil) != tt.wantErr {
t.Errorf("generateRandomBytes() error = %v, wantErr %v", err, tt.wantErr)
return
}
// Check that length is correct, since its random, no more can be done without adjusting seeding
if len(got) != len(tt.exampleResult) {
t.Errorf("generateRandomBytes() length of got = %d, want %d", len(got), len(tt.exampleResult))
}
})
}
}
func Test_generateSecurePassword(t *testing.T) {
type args struct {
length int
}
tests := []struct {
name string
args args
examplePassword string
wantErr bool
}{
// Randomly create some tests, run the same one over and over to ensure seeding is working
{"Basic case 1", args{25}, "Qfmk0rD8HAq3zZD37hvs41234", false},
{"Basic case 2", args{25}, "5iWL3MoeSTQ0ILk4hC4s43214", false},
{"Basic case 3", args{25}, "y0pFxuh_sTp1qhp_WCv3w4afd", false},
{"Basic case 4", args{25}, "q2yogfL6JCDntj9cYfdszda35", false},
{"Basic case 5", args{25}, "TI29Yhy32f3tZtsj42q34rCgG", false},
{"Basic case 6", args{25}, "4Y0FGvwsFcCm-2QtadfzR9324", false},
{"Short password", args{6}, "Ad8-3S", false},
}
// Build a map that will detect duplicates
repeatedResults := make(map[string]bool)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotPassword, err := generateSecurePassword(tt.args.length)
if (err != nil) != tt.wantErr {
t.Errorf("generateSecurePassword() error = %v, wantErr %v", err, tt.wantErr)
return
}
// Check that length is correct, since its random, no more can be done without adjusting seeding
if len(gotPassword) != len(tt.examplePassword) {
t.Errorf("generateSecurePassword() length of gotPassword = %d, wantPassword %v", len(gotPassword), len(tt.examplePassword))
}
// Add to the map for detecting duplicates
repeatedResults[gotPassword] = true
})
}
// Fail if there are fewer deduplicated passwords than tests
if len(repeatedResults) < len(tests) {
t.Errorf("generateSecurePassword() collision detected: length of unique passwords: %d, number of tests: %d", len(repeatedResults), len(tests))
}
}
| 115 |
ec2-macos-init | aws | Go | package ec2macosinit
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"syscall"
"time"
)
// ioReadCloserToString converts an io.ReadCloser to a string.
func ioReadCloserToString(iorc io.ReadCloser) (str string, err error) {
buf := new(bytes.Buffer)
_, err = buf.ReadFrom(iorc)
if err != nil {
return "", err
}
return buf.String(), nil
}
// commandOutput wraps the output from an exec command as strings.
type commandOutput struct {
stdout string
stderr string
}
// executeCommand executes the command and returns stdout and stderr as strings.
func executeCommand(c []string, runAsUser string, envVars []string) (output commandOutput, err error) {
// Separate name and args, plus catch a few error cases
var name string
var args []string
if len(c) > 1 {
name = c[0]
args = c[1:]
} else if len(c) == 1 {
name = c[0]
if name == "" { // Empty string case ("")
return commandOutput{}, fmt.Errorf("ec2macosinit: must provide a command")
}
} else { // Empty struct case ([]string{})
return commandOutput{}, fmt.Errorf("ec2macosinit: must provide a command")
}
// Set command and create output buffers
cmd := exec.Command(name, args...)
var stdoutb, stderrb bytes.Buffer
cmd.Stdout = &stdoutb
cmd.Stderr = &stderrb
// Set runAsUser, if defined, otherwise will run as root
if runAsUser != "" {
uid, gid, err := getUIDandGID(runAsUser)
if err != nil {
return commandOutput{}, fmt.Errorf("ec2macosinit: error looking up user: %s\n", err)
}
cmd.SysProcAttr = &syscall.SysProcAttr{}
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)}
}
// Append environment variables
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, envVars...)
// Run command
err = cmd.Run()
if err != nil {
return commandOutput{stdout: stdoutb.String(), stderr: stderrb.String()}, err
}
return commandOutput{stdout: stdoutb.String(), stderr: stderrb.String()}, nil
}
// getUIDandGID takes a username and returns the uid and gid for that user.
// While testing UID/GID lookup for a user, it was found that the user.Lookup() function does not always return
// information for a new user on first boot. In the case that user.Lookup() fails, we try dscacheutil, which has a
// higher success rate. If that fails, we return an error. Any successful case returns the UID and GID as ints.
func getUIDandGID(username string) (uid int, gid int, err error) {
var uidstr, gidstr string
// Preference is user.Lookup(), if it works
u, lookuperr := user.Lookup(username)
if lookuperr != nil {
// user.Lookup() has failed, second try by checking the DS cache
out, cmderr := executeCommand([]string{"dscacheutil", "-q", "user", "-a", "name", username}, "", []string{})
if cmderr != nil {
// dscacheutil has failed with an error
return 0, 0, fmt.Errorf("ec2macosinit: error while looking up user %s: \n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil stderr: %s\n",
username, lookuperr, cmderr, out.stderr)
}
// Check length of stdout - dscacheutil returns nothing if user is not found
if len(out.stdout) > 0 { // dscacheutil has returned something
// Command output from dscacheutil should look like:
// name: ec2-user
// password: ********
// uid: 501
// gid: 20
// dir: /Users/ec2-user
// shell: /bin/bash
// gecos: ec2-user
dsSplit := strings.Split(out.stdout, "\n") // split on newline to separate uid and gid
for _, e := range dsSplit {
eSplit := strings.Fields(e) // split into fields to separate tag with id
// Find UID and GID and set them
if strings.HasPrefix(e, "uid") {
if len(eSplit) != 2 {
// dscacheutil has returned some sort of weird output that can't be split
return 0, 0, fmt.Errorf("ec2macosinit: error while splitting dscacheutil uid output for user %s: %s\n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil stderr: %s\n",
username, out.stdout, lookuperr, cmderr, out.stderr)
}
uidstr = eSplit[1]
} else if strings.HasPrefix(e, "gid") {
if len(eSplit) != 2 {
// dscacheutil has returned some sort of weird output that can't be split
return 0, 0, fmt.Errorf("ec2macosinit: error while splitting dscacheutil gid output for user %s: %s\n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil stderr: %s\n",
username, out.stdout, lookuperr, cmderr, out.stderr)
}
gidstr = eSplit[1]
}
}
} else {
// dscacheutil has returned nothing, user is not found
return 0, 0, fmt.Errorf("ec2macosinit: user %s not found: \n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil stderr: %s\n",
username, lookuperr, cmderr, out.stderr)
}
} else {
// user.Lookup() was successful, use the returned UID/GID
uidstr = u.Uid
gidstr = u.Gid
}
// Convert UID and GID to int
uid, err = strconv.Atoi(uidstr)
if err != nil {
return 0, 0, fmt.Errorf("ec2macosinit: error while converting UID to int: %s\n", err)
}
gid, err = strconv.Atoi(gidstr)
if err != nil {
return 0, 0, fmt.Errorf("ec2macosinit: error while converting GID to int: %s\n", err)
}
return uid, gid, nil
}
// userExists takes a username and returns whether or not the user exists on the system.
func userExists(username string) (exists bool, err error) {
out, err := executeCommand([]string{"dscacheutil", "-q", "user", "-a", "name", username}, "", []string{})
if err != nil {
return false, fmt.Errorf("ec2macosinit: error while checking dscacheutil for user %s: %s\n", username, err)
}
// If dscacheutil output containing the username, the user exists
if strings.Contains(out.stdout, username) {
return true, nil
}
// No output means the user does not exist
return false, nil
}
// retry is an extremely simple retry function which waits a specified duration on error and retries.
func retry(attempts int, sleep time.Duration, f func() error) (err error) {
for i := 0; ; i++ {
err = f()
if err == nil {
return
}
if i >= (attempts - 1) {
break
}
time.Sleep(sleep)
}
return fmt.Errorf("after %d attempts, last error: %s", attempts, err)
}
// getOSProductVersion uses the sysctl command to retrieve the product version number from the kernel
func getOSProductVersion() (version string, err error) {
cmdGetProductVersion := []string{"sysctl", "-n", "kern.osproductversion"}
output, err := executeCommand(cmdGetProductVersion, "", []string{})
if err != nil {
return version, fmt.Errorf("ec2macosinit: error getting kernel state for product version: %s", err)
}
// Remove any extra space characters from the output to leave only the product version number
version = strings.TrimSpace(output.stdout)
return version, nil
}
| 197 |
ec2-macos-init | aws | Go | package ec2macosinit
import (
"fmt"
"io"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func Test_ioReadCloserToString(t *testing.T) {
expected := "test string"
input := io.NopCloser(strings.NewReader(expected))
out, err := ioReadCloserToString(input)
assert.NoError(t, err)
assert.Equal(t, expected, out)
}
func Test_retry(t *testing.T) {
type args struct {
attempts int
sleep time.Duration
f func() error
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "FunctionWithNoError",
args: args{
attempts: 2,
sleep: 1 * time.Nanosecond,
f: func() error {
return nil
},
},
wantErr: false,
},
{
name: "FunctionWithError",
args: args{
attempts: 2,
sleep: 1 * time.Nanosecond,
f: func() error {
return fmt.Errorf("test error")
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := retry(tt.args.attempts, tt.args.sleep, tt.args.f); (err != nil) != tt.wantErr {
t.Errorf("retry() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
| 64 |
ec2-macos-system-monitor | aws | Go | package main
import (
"flag"
"log"
"os"
"os/signal"
"strconv"
"sync/atomic"
"syscall"
"time"
ec2sm "github.com/aws/ec2-macos-system-monitor/lib/ec2macossystemmonitor"
)
// pollInterval is the duration in between gathering of CPU metrics
const pollInterval = 60 * time.Second
// DefaultSerialDevice is the default serial device attached to mac1.metal instances for communication
// This device is able to receive various payloads when encapsulated in json
const DefaultSerialDevice = "/dev/cu.pci-serial0"
func main() {
disableSyslog := flag.Bool("disable-syslog", false, "Prevent log output to syslog")
flag.Parse()
logger, err := ec2sm.NewLogger("ec2monitoring-cpuutilization", !*disableSyslog, true)
if err != nil {
log.Fatalf("Failed to create logger: %s", err)
}
logger.Infof("Starting up relayd for monitoring\n")
relay, err := ec2sm.NewRelay(DefaultSerialDevice)
if err != nil {
log.Fatalf("Failed to create relay: %s", err)
}
intervalString := strconv.Itoa(ec2sm.DefaultLogInterval)
cpuStatus := ec2sm.StatusLogBuffer{Message: "Sent CPU Utilization (%d bytes) over " + intervalString + " minute(s)", Written: 0}
relayStatus := ec2sm.StatusLogBuffer{Message: "[relayd] Received data and sent %d bytes to serial device over " + intervalString + " minutes", Written: 0}
// Kick off Relay in a go routine
go relay.StartRelay(logger, &relayStatus)
// Setup signal handling into a channel, catch SIGINT and SIGTERM for now which should suffice for launchd
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
// Setup the polling channel for kicking off CPU metrics gathering
pollingCh := time.Tick(pollInterval)
// Setup logging interval timer for flushing logs
LoggingTimer := time.Tick(ec2sm.DefaultLogInterval * time.Minute)
// Check if the socket is there, if not, warn that this might fail
if !ec2sm.CheckSocketExists() {
logger.Fatal("Socket does not exist, relayd may not be running")
}
// Main for loop that polls for signals and CPU ticks
for {
select {
case sig := <-signals:
if cpuStatus.Written > 0 {
logger.Infof(cpuStatus.Message, cpuStatus.Written)
}
if relayStatus.Written > 0 {
logger.Infof(relayStatus.Message, relayStatus.Written)
}
log.Println("exiting due to signal:", sig)
// Send signal to relay server through channel to shutdown
relay.ReadyToClose <- true
// Exit cleanly
os.Exit(0)
case <-pollingCh:
// Fetch the current CPU Utilization
cpuUtilization, err := ec2sm.RunningCpuUsage()
if err != nil {
logger.Fatalf("Unable to get CPU Utilization: %s\n", err)
}
// Send the data to the relay
written, err := ec2sm.SendMessage("cpuutil", cpuUtilization, false)
if err != nil {
logger.Fatalf("Unable to write message to relay: %s", err)
}
// Add current written values to running total cpuStatus.Written
cpuStatus.Written += int64(written)
case <-LoggingTimer:
// flush the logs since the timer fired. The cpuStatus info is local to this routine but relayStatus is not,
// so use atomic for the non-local one to ensure its safe
logger.Infof(cpuStatus.Message, cpuStatus.Written)
// Since we logged the total, reset to zero for continued tracking
cpuStatus.Written = 0
logger.Infof(relayStatus.Message, relayStatus.Written)
// Since we logged the total, reset to zero, do this via atomic since its modified in another goroutine
atomic.StoreInt64(&relayStatus.Written, 0)
}
}
}
| 100 |
ec2-macos-system-monitor | aws | Go | package ec2macossystemmonitor
import (
"fmt"
"github.com/shirou/gopsutil/cpu"
"strconv"
)
// RunningCpuUsage gathers the value expected for CloudWatch but allows long running measurement. This is intended for
// usage where repeated calls will take place.
func RunningCpuUsage() (s string, err error) {
percent, err := cpu.Percent(0, false)
if err != nil {
return "", fmt.Errorf("ec2macossystemmonitor: error while getting cpu stats: %s", err)
}
return strconv.FormatFloat(percent[0], 'f', -1, 64), nil
}
| 18 |
ec2-macos-system-monitor | aws | Go | package ec2macossystemmonitor
import (
"fmt"
"log"
"log/syslog"
"os"
)
// Logger contains booleans for where to log, a tag used in syslog and the syslog Writer itself.
type Logger struct {
LogToStdout bool
LogToSystemLog bool
Tag string
SystemLog syslog.Writer
}
// defaultLogInterval is the number of writes before emitting a log entry 10 = once every 10 minutes
const DefaultLogInterval = 10
// StatusLogBuffer contains a message format string and a written bytes for this format string for flushing the logs
type StatusLogBuffer struct {
Message string
Written int64
}
// IntervalLogger is a special logger that provides a way to only log at a certain interval.
type IntervalLogger struct {
logger Logger
LogInterval int
Counter int
Message string
}
// NewLogger creates a new logger. Logger writes using the LOG_LOCAL0 facility by default if system logging is enabled.
func NewLogger(tag string, systemLog bool, stdout bool) (logger *Logger, err error) {
// Set up system logging, if enabled
syslogger := &syslog.Writer{}
if systemLog {
syslogger, err = syslog.New(syslog.LOG_LOCAL0, tag)
if err != nil {
return &Logger{}, fmt.Errorf("ec2macossystemmonitor: unable to create new syslog logger: %s\n", err)
}
}
// Set log to use microseconds, if stdout is enabled
if stdout {
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
}
return &Logger{LogToSystemLog: systemLog, LogToStdout: stdout, Tag: tag, SystemLog: *syslogger}, nil
}
// Info writes info to stdout and/or the system log.
func (l *Logger) Info(v ...interface{}) {
if l.LogToStdout {
log.Print(v...)
}
if l.LogToSystemLog {
l.SystemLog.Info(fmt.Sprint(v...))
}
}
// Infof writes formatted info to stdout and/or the system log.
func (l *Logger) Infof(format string, v ...interface{}) {
if l.LogToStdout {
log.Printf(format, v...)
}
if l.LogToSystemLog {
l.SystemLog.Info(fmt.Sprintf(format, v...))
}
}
// Warn writes a warning to stdout and/or the system log.
func (l *Logger) Warn(v ...interface{}) {
if l.LogToStdout {
log.Print(v...)
}
if l.LogToSystemLog {
l.SystemLog.Warning(fmt.Sprint(v...))
}
}
// Warnf writes a formatted warning to stdout and/or the system log.
func (l *Logger) Warnf(format string, v ...interface{}) {
if l.LogToStdout {
log.Printf(format, v...)
}
if l.LogToSystemLog {
l.SystemLog.Warning(fmt.Sprintf(format, v...))
}
}
// Error writes an error to stdout and/or the system log.
func (l *Logger) Error(v ...interface{}) {
if l.LogToStdout {
log.Print(v...)
}
if l.LogToSystemLog {
l.SystemLog.Err(fmt.Sprint(v...))
}
}
// Errorf writes a formatted error to stdout and/or the system log.
func (l *Logger) Errorf(format string, v ...interface{}) {
if l.LogToStdout {
log.Printf(format, v...)
}
if l.LogToSystemLog {
l.SystemLog.Err(fmt.Sprintf(format, v...))
}
}
// Fatal writes an error to stdout and/or the system log then exits 1.
func (l *Logger) Fatal(v ...interface{}) {
l.Error(v...)
os.Exit(1)
}
// Fatalf writes a formatted error to stdout and/or the system log then exits 1.
func (l *Logger) Fatalf(format string, v ...interface{}) {
l.Errorf(format, v...)
os.Exit(1)
}
// PushToInterval adds to the counter and sets the Message, care should be taken to retrieve the Message before setting since
// its overwritten
func (t *IntervalLogger) PushToInterval(i int, message string) (flushed bool) {
t.Counter = +i
t.Message = message
if t.Counter > t.LogInterval {
t.logger.Info(message)
t.Counter = 0
return true
}
return false
}
| 137 |
ec2-macos-system-monitor | aws | Go | package ec2macossystemmonitor
import (
"bytes"
"compress/zlib"
"encoding/base64"
"encoding/json"
"fmt"
"hash/adler32"
"net"
"os"
"sync/atomic"
"time"
)
const SocketTimeout = 5 * time.Second
// BuildMessage takes a tag along with data for the tag and builds a byte slice to be sent to the relay.
//
// The tag is used as a way to namespace various payloads that are supported. Data is the payload and its format is
// specific to each tag. Each payload has the option to be compressed and this flag is part of the envelope created for
// sending data. The slice of bytes is passed back to the caller to allow flexibility to log the bytes if desired before
// passing to the relay via PassToRelayd
func BuildMessage(tag string, data string, compress bool) ([]byte, error) {
var payload SerialPayload
// This determines if the data will be passed in as provided or zlib compressed and then base64 encoded
// Some payload will exceed the limit of what can be sent on the serial device, so compression allows more data
// to be sent. base64 encoding allows safe characters only to be passed on the device
if compress {
var b bytes.Buffer
w, err := zlib.NewWriterLevel(&b, 9)
if err != nil {
return []byte{}, fmt.Errorf("ec2macossystemmonitor: couldn't get compression writer: %s", err)
}
_, err = w.Write([]byte(data))
if err != nil {
return []byte{}, fmt.Errorf("ec2macossystemmonitor: couldn't copy compressed data: %s", err)
}
err = w.Close()
if err != nil {
return []byte{}, fmt.Errorf("ec2macossystemmonitor: couldn't close compressor: %s", err)
}
encodedData := base64.StdEncoding.EncodeToString(b.Bytes())
payload = SerialPayload{
Tag: tag,
Compress: compress,
Data: encodedData,
}
} else {
// No compression needed, simply create the SerialPayload
payload = SerialPayload{
Tag: tag,
Compress: compress,
Data: data,
}
}
// Once the payload is created, it's converted to json
payloadBytes, err := json.Marshal(payload)
if err != nil {
return []byte{}, fmt.Errorf("ec2macossystemmonitor: couldn't get %s into json", err)
}
// A checksum is computed on the json payload for the serial message
checkSum := adler32.Checksum(payloadBytes)
message := SerialMessage{
Csum: checkSum,
Payload: string(payloadBytes),
}
// Once the message is created, it's converted to json
messageBytes, err := json.Marshal(message)
if err != nil {
return []byte{}, fmt.Errorf("ec2macossystemmonitor: couldn't convert %s into json", err)
}
messageBytes = append(messageBytes, "\n"...)
return messageBytes, nil
}
// PassToRelayd takes a byte slice and writes it to a UNIX socket to send for relaying.
func PassToRelayd(messageBytes []byte) (n int, err error) {
// The socket file needs to be created to write, the server creates this file.
if !fileExists(SocketPath) {
return 0, fmt.Errorf("ec2macossystemmonitor: %s does not exist, cannot send message: %s", SocketPath, string(messageBytes))
}
// Finally write the serial message to the domain socket
sock, err := net.Dial("unix", SocketPath)
if err != nil {
return 0, fmt.Errorf("cec2macossystemmonitor: could not connect to %s: %s", SocketPath, err)
}
defer sock.Close()
_, err = sock.Write(messageBytes)
if err != nil {
return 0, fmt.Errorf("ec2macossystemmonitor: error while writing to socket: %s", err)
}
// Return the length of the bytes written to the socket
return len(messageBytes), nil
}
// SendMessage takes a tag along with data for the tag and writes to a UNIX socket to send for relaying. This is provided
// for convenience to allow quick sending of data to the relay. It calls BuildMessage and then PassToRelayd in order.
func SendMessage(tag string, data string, compress bool) (n int, err error) {
msgBytes, err := BuildMessage(tag, data, compress)
if err != nil {
return 0, fmt.Errorf("ec2macossystemmonitor: error while building message bytes: %s", err)
}
return PassToRelayd(msgBytes)
}
// SerialRelay contains the serial connection and UNIX domain socket listener as well as the channel that communicates
// that the resources can be closed.
type SerialRelay struct {
serialConnection SerialConnection // serialConnection is the managed serial device connection for writing
listener net.UnixListener // listener is the UNIX domain socket UnixzzListener for reading
ReadyToClose chan bool // ReadyToClose is the channel for communicating the need to close connections
}
// NewRelay creates an instance of the relay server and returns a SerialRelay for manual closing.
//
// The SerialRelay returned from NewRelay is designed to be used in a go routine by using StartRelay. This allows the
// caller to handle OS Signals and other events for clean shutdown rather than relying upon defer calls.
func NewRelay(serialDevice string) (relay SerialRelay, err error) {
// Create a serial connection
serCon, err := NewSerialConnection(serialDevice)
if err != nil {
return SerialRelay{}, fmt.Errorf("relayd: failed to build a connection to serial interface: %s", err)
}
// Clean the socket in case its stale
if err = os.RemoveAll(SocketPath); err != nil {
if _, ok := err.(*os.PathError); ok {
// Help guide that the SocketPath is invalid
return SerialRelay{}, fmt.Errorf("relayd: unable to clean %s: %s", SocketPath, err)
} else {
// Unknown issue, return the error directly
return SerialRelay{}, err
}
}
// Create a listener on the socket by getting the address and then creating a Unix Listener
addr, err := net.ResolveUnixAddr("unix", SocketPath)
if err != nil {
return SerialRelay{}, fmt.Errorf("relayd: unable to resolve address: %s", err)
}
listener, err := net.ListenUnix("unix", addr)
if err != nil {
return SerialRelay{}, fmt.Errorf("relayd: unable to listen on socket: %s", err)
}
// Create the SerialRelay to return
relay.listener = *listener
relay.serialConnection = *serCon
// Create the channel for sending an exit
relay.ReadyToClose = make(chan bool)
return relay, nil
}
// StartRelay takes the connections for the serial relay and begins listening.
//
// This is a server implementation of the SerialRelay so it logs to a provided logger, and empty logger can be provided
// to stop logging if desired. This function is designed to be used in a go routine so logging may be the only way to
// get data about behavior while it is running. The resources can be shut down by sending true to the ReadyToClose
// channel. This invokes CleanUp() which is exported in case the caller desires to call it instead.
func (relay *SerialRelay) StartRelay(logger *Logger, relayStatus *StatusLogBuffer) {
for {
// Accept new connections, dispatching them to relayServer in a goroutine.
err := relay.listener.SetDeadline(time.Now().Add(SocketTimeout))
if err != nil {
logger.Fatal("Unable to set deadline on socket:", err)
}
socCon, err := relay.listener.Accept()
// Look for signal to exit, otherwise keep going, check the error only if we aren't supposed to shutdown
select {
case <-relay.ReadyToClose:
logger.Info("[relayd] requested to shutdown")
// Clean up resources manually
relay.CleanUp()
// Return to stop the connections from continuing
return
default:
// If ReadyToClose has not been sent, then check for errors, handle timeouts, otherwise process
if err != nil {
if er, ok := err.(net.Error); ok && er.Timeout() {
// This is just a timeout, break the loop and go to the top to start listening again
continue
} else {
// This is some other error, for Accept(), its a fatal error if we can't Accept()
logger.Fatal("Unable to start accepting on socket:", err)
}
}
}
// Write the date to the relay
written, err := relay.serialConnection.RelayData(socCon)
if err != nil {
logger.Errorf("Failed to send data: %s\n", err)
}
// Increment the counter
atomic.AddInt64(&relayStatus.Written, int64(written))
}
}
// CleanUp manually closes the connections for a Serial Relay. This is called from StartRelay when true is sent on
// ReadyToClose so it should only be called separately if closing outside of that mechanism.
func (relay *SerialRelay) CleanUp() {
_ = relay.listener.Close()
_ = relay.serialConnection.Close()
_ = os.RemoveAll(SocketPath)
}
| 213 |
ec2-macos-system-monitor | aws | Go | package ec2macossystemmonitor
import (
"reflect"
"testing"
)
// TestBuildMessage creates some basic tests to ensure the options result in the correct bytes
func TestBuildMessage(t *testing.T) {
// emptyTestBytes is the byte slice of a payload tag "test" and empty payload
emptyTestBytes := []byte{123, 34, 99, 115, 117, 109, 34, 58, 53, 48, 55, 53, 55, 57, 55, 52, 52, 44, 34, 112, 97, 121, 108, 111, 97, 100, 34, 58, 34, 123, 92, 34, 116, 97, 103, 92, 34, 58, 92, 34, 116, 101, 115, 116, 92, 34, 44, 92, 34, 99, 111, 109, 112, 114, 101, 115, 115, 92, 34, 58, 102, 97, 108, 115, 101, 44, 92, 34, 100, 97, 116, 97, 92, 34, 58, 92, 34, 92, 34, 125, 34, 125, 10}
// emptyTestBytes is the byte slice of a payload tag "test" and empty payload while compressing ""
emptyCompressedTestBytes := []byte{123, 34, 99, 115, 117, 109, 34, 58, 49, 54, 57, 53, 52, 54, 49, 51, 56, 44, 34, 112, 97, 121, 108, 111, 97, 100, 34, 58, 34, 123, 92, 34, 116, 97, 103, 92, 34, 58, 92, 34, 116, 101, 115, 116, 92, 34, 44, 92, 34, 99, 111, 109, 112, 114, 101, 115, 115, 92, 34, 58, 116, 114, 117, 101, 44, 92, 34, 100, 97, 116, 97, 92, 34, 58, 92, 34, 101, 78, 111, 66, 65, 65, 68, 47, 47, 119, 65, 65, 65, 65, 69, 61, 92, 34, 125, 34, 125, 10}
basicCPUTestBytes := []byte{123, 34, 99, 115, 117, 109, 34, 58, 50, 49, 49, 56, 49, 57, 50, 57, 53, 48, 44, 34, 112, 97, 121, 108, 111, 97, 100, 34, 58, 34, 123, 92, 34, 116, 97, 103, 92, 34, 58, 92, 34, 99, 112, 117, 117, 116, 105, 108, 92, 34, 44, 92, 34, 99, 111, 109, 112, 114, 101, 115, 115, 92, 34, 58, 102, 97, 108, 115, 101, 44, 92, 34, 100, 97, 116, 97, 92, 34, 58, 92, 34, 50, 46, 48, 92, 34, 125, 34, 125, 10}
type args struct {
tag string
data string
compress bool
}
tests := []struct {
name string
args args
want []byte
wantErr bool
}{
{"Empty Message", args{"test", "", false}, emptyTestBytes, false},
{"Empty Message Compressed", args{"test", "", true}, emptyCompressedTestBytes, false},
{"Basic CPU Test", args{"cpuutil", "2.0", false}, basicCPUTestBytes, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := BuildMessage(tt.args.tag, tt.args.data, tt.args.compress)
if (err != nil) != tt.wantErr {
t.Errorf("BuildMessage() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("BuildMessage() got = %v, want %v", got, tt.want)
}
})
}
}
| 44 |
ec2-macos-system-monitor | aws | Go | package ec2macossystemmonitor
import (
"bytes"
"fmt"
"io"
"net"
"go.bug.st/serial"
)
// SocketPath is the default socket for relayd.
const SocketPath = "/tmp/.ec2monitoring.sock"
// SerialConnection is the container for passing the ReadWriteCloser for serial connections.
type SerialConnection struct {
port serial.Port
}
// SerialPayload is the container for a payload that is written to serial device.
type SerialPayload struct {
// Tag is the namespace that separates different types of data on the device
Tag string `json:"tag"`
// Compress determines if the data is compressed and base64 encoded
Compress bool `json:"compress"`
// Data is the actual data payload to be consumed
Data string `json:"data"`
}
// SerialMessage is the container to actually send on the serial connection, contains checksum of SerialPayload to
// provide additional assurance the entire payload has been written.
type SerialMessage struct {
// Csum is the checksum used to ensure all data was received
Csum uint32 `json:"csum"`
// Payload is the SerialPayload in json format
Payload string `json:"payload"`
}
// CheckSocketExists is a helper function to quickly check for the server.
func CheckSocketExists() (exists bool) {
return fileExists(SocketPath)
}
// NewSerialConnection creates a serial device connection and returns a reference to the connection.
func NewSerialConnection(device string) (conn *SerialConnection, err error) {
// Set up options for serial device, take defaults for now on everything else
mode := &serial.Mode{
BaudRate: 115200,
}
// Attempt to avoid opening a non-existent serial connection
if !fileExists(device) {
return nil, fmt.Errorf("ec2macossystemmonitor: serial device does not exist: %s", device)
}
// Open the serial port
port, err := serial.Open(device, mode)
if err != nil {
return nil, fmt.Errorf("ec2macossystemmonitor: unable to get serial connection: %s", err)
}
// Put the port in a SerialConnection for handing it off
s := SerialConnection{port}
return &s, nil
}
// Close is simply a pass through to close the device so it remains open in the scope needed.
func (s *SerialConnection) Close() (err error) {
err = s.port.Close()
if err != nil {
return err
}
return nil
}
// RelayData is the primary function for reading data from the socket provided and writing to the serial connection.
func (s *SerialConnection) RelayData(sock net.Conn) (n int, err error) {
defer sock.Close()
// Create a buffer for reading in from the socket, probably want to bound this
var buf bytes.Buffer
// Read in the socket data into the buffer
_, err = io.Copy(&buf, sock)
if err != nil {
return 0, fmt.Errorf("ec2macossystemmonitor: failed to read socket to buffer: %s", err)
}
// Write out the buffer to the serial device
written, err := s.port.Write(buf.Bytes())
if err != nil {
return 0, fmt.Errorf("ec2macossystemmonitor: failed to write buffer to serial: %s", err)
}
return written, nil
}
| 91 |
ec2-macos-system-monitor | aws | Go | package ec2macossystemmonitor
import (
"os"
)
// fileExists returns true if a file exists and is not a directory.
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
| 15 |
ec2-macos-system-monitor | aws | Go | package ec2macossystemmonitor
import (
"runtime"
"testing"
)
// Test_fileExists tests the fileExists helper function with basic coverage
func Test_fileExists(t *testing.T) {
_, testFile, _, _ := runtime.Caller(0)
type args struct {
filename string
}
tests := []struct {
name string
args args
want bool
}{
{"Not Real File", args{"notafile"}, false},
{"Known File", args{testFile}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := fileExists(tt.args.filename); got != tt.want {
t.Errorf("fileExists() = %v, want %v", got, tt.want)
}
})
}
} | 29 |
ec2-macos-utils | aws | Go | package main
import (
"context"
"fmt"
"os"
"github.com/aws/ec2-macos-utils/internal/cmd"
"github.com/aws/ec2-macos-utils/internal/contextual"
"github.com/aws/ec2-macos-utils/internal/system"
)
func main() {
sys, err := system.Scan()
if err != nil {
panic(fmt.Errorf("cannot identify system: %w", err))
}
p := sys.Product()
if p == nil {
panic("no product associated with identified system")
}
ctx := contextual.WithProduct(context.Background(), p)
if err := cmd.MainCommand().ExecuteContext(ctx); err != nil {
os.Exit(1)
}
}
| 29 |
ec2-macos-utils | aws | Go | package build
const (
// GitHubLink is the static HTTPS URL for EC2 macOS Utils public GitHub repository.
GitHubLink = "https://github.com/aws/ec2-macos-utils"
)
var (
// CommitDate is the date of the latest commit in the repository. This variable gets set at build-time.
CommitDate string
// Version is the latest version of the utility. This variable gets set at build-time.
Version string
)
| 15 |
ec2-macos-utils | aws | Go | // Package tools holds references to build time tools and is not intended to be
// imported for use.
package tools
| 4 |
ec2-macos-utils | aws | Go | // +build tools
package tools
// goimports: golang.org/x/tools/cmd/goimports
import _ "golang.org/x/tools/cmd/goimports"
| 7 |
ec2-macos-utils | aws | Go | package cmd
import (
"errors"
"fmt"
"strings"
"github.com/dustin/go-humanize"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/aws/ec2-macos-utils/internal/contextual"
"github.com/aws/ec2-macos-utils/internal/diskutil"
"github.com/aws/ec2-macos-utils/internal/diskutil/identifier"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
)
// growContainer is a struct for holding all information passed into the grow container command.
type growContainer struct {
dryrun bool
id string
}
// growContainerCommand creates a new command which grows APFS containers to their maximum size.
func growContainerCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "grow",
Short: "resize container to max size",
Long: strings.TrimSpace(`
grow resizes the container to its maximum size using
'diskutil'. The container to operate on can be specified
with its identifier (e.g. disk1 or /dev/disk1). The string
'root' may be provided to resize the OS's root volume.
`),
}
// Set up the flags to be passed into the command
growArgs := growContainer{}
cmd.PersistentFlags().StringVar(&growArgs.id, "id", "", `container identifier to be resized or "root"`)
cmd.PersistentFlags().BoolVar(&growArgs.dryrun, "dry-run", false, "run command without mutating changes")
cmd.MarkPersistentFlagRequired("id")
// Set up the command's pre-run to check for root permissions.
// This is necessary since diskutil repairDisk requires root permissions to run.
cmd.PreRunE = assertRootPrivileges
// Set up the command's run function
cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
product := contextual.Product(ctx)
if product == nil {
return errors.New("product required in context")
}
logrus.WithField("product", product).Info("Configuring diskutil for product")
d, err := diskutil.ForProduct(product)
if err != nil {
return err
}
if growArgs.dryrun {
d = diskutil.Dryrun(d)
}
logrus.WithField("args", growArgs).Debug("Running grow command with args")
if err := run(d, growArgs); err != nil {
return err
}
return nil
}
return cmd
}
// run attempts to grow the disk for the specified device identifier to its maximum size using diskutil.GrowContainer.
func run(utility diskutil.DiskUtil, args growContainer) error {
di, err := getTargetDiskInfo(utility, args.id)
if err != nil {
return fmt.Errorf("cannot grow container: %w", err)
}
logrus.WithField("device_id", di.DeviceIdentifier).Info("Attempting to grow container...")
if err := diskutil.GrowContainer(utility, di); err != nil {
return err
}
logrus.WithField("device_id", di.ParentWholeDisk).Info("Fetching updated information for device...")
updatedDi, err := getTargetDiskInfo(utility, di.ParentWholeDisk)
if err != nil {
logrus.WithError(err).Error("Error while fetching updated disk information")
return err
}
logrus.WithFields(logrus.Fields{
"device_id": di.DeviceIdentifier,
"total_size": humanize.Bytes(updatedDi.TotalSize),
}).Info("Successfully grew device to maximum size")
return nil
}
// getTargetDiskInfo retrieves the disk info for the specified target identifier. If the identifier is "root", simply
// return the disk information for "/". Otherwise, check if the identifier exists in the system partitions before
// returning the disk information.
func getTargetDiskInfo(du diskutil.DiskUtil, target string) (*types.DiskInfo, error) {
if strings.EqualFold("root", target) {
return du.Info("/")
}
partitions, err := du.List(nil)
if err != nil {
return nil, fmt.Errorf("cannot list partitions: %w", err)
}
if err := validateDeviceID(target, partitions); err != nil {
return nil, fmt.Errorf("invalid target: %w", err)
}
return du.Info(target)
}
// validateDeviceID verifies if the provided ID is a valid device identifier or device node.
func validateDeviceID(id string, partitions *types.SystemPartitions) error {
// Check if ID is provided
if strings.TrimSpace(id) == "" {
return errors.New("empty device id")
}
// Get the device identifier
deviceID := identifier.ParseDiskID(id)
if deviceID == "" {
return errors.New("id does not match the expected device identifier format")
}
// Check the device directory for the given identifier
for _, name := range partitions.AllDisks {
if strings.EqualFold(name, deviceID) {
return nil
}
}
return errors.New("invalid device identifier")
}
| 144 |
ec2-macos-utils | aws | Go | package cmd
import (
"fmt"
"io/ioutil"
"testing"
mock_diskutil "github.com/aws/ec2-macos-utils/internal/diskutil/mocks"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
"github.com/golang/mock/gomock"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func init() {
logrus.SetOutput(ioutil.Discard)
}
func TestRun_WithInfoErr(t *testing.T) {
const (
testDiskID = "root"
testDiskAlias = "/"
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mock := mock_diskutil.NewMockDiskUtil(ctrl)
mock.EXPECT().Info(testDiskAlias).Return(nil, fmt.Errorf("error"))
err := run(mock, growContainer{
id: testDiskID,
})
assert.Error(t, err, `should fail to get disk information for /`)
}
func TestRun_WithoutDiskInfo(t *testing.T) {
const (
testDiskID = "root"
testDiskAlias = "/"
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
disk := types.DiskInfo{
DeviceIdentifier: testDiskID,
}
mock := mock_diskutil.NewMockDiskUtil(ctrl)
mock.EXPECT().Info(testDiskAlias).Return(&disk, nil)
err := run(mock, growContainer{
id: testDiskID,
})
assert.Error(t, err, "should fail to grow the container since the DiskInfo isn't populated")
}
func TestRun_WithUpdatedInfoErr(t *testing.T) {
const (
testDiskID = "disk1"
diskSize uint64 = 3_000_000
partSize uint64 = 500_000
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisks: []string{testDiskID},
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Size: diskSize,
Partitions: []types.Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
DeviceIdentifier: testDiskID,
ParentWholeDisk: testDiskID,
VirtualOrPhysical: "Physical",
}
mock := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mock.EXPECT().List(nil).Return(&parts, nil),
mock.EXPECT().Info(testDiskID).Return(&disk, nil),
mock.EXPECT().RepairDisk(testDiskID).Return("", nil),
mock.EXPECT().List(nil).Return(&parts, nil),
mock.EXPECT().ResizeContainer(testDiskID, "0").Return("", nil),
mock.EXPECT().List(nil).Return(nil, fmt.Errorf("error")),
)
err := run(mock, growContainer{
id: testDiskID,
})
assert.Error(t, err, "should fail to get updated DiskInfo due to list error")
}
func TestRun_Success(t *testing.T) {
const (
testDiskID = "disk1"
diskSize uint64 = 3_000_000
partSize uint64 = 500_000
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisks: []string{testDiskID},
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Size: diskSize,
Partitions: []types.Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
DeviceIdentifier: testDiskID,
ParentWholeDisk: testDiskID,
TotalSize: diskSize,
VirtualOrPhysical: "Physical",
}
mock := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mock.EXPECT().List(nil).Return(&parts, nil),
mock.EXPECT().Info(testDiskID).Return(&disk, nil),
mock.EXPECT().RepairDisk(testDiskID).Return("", nil),
mock.EXPECT().List(nil).Return(&parts, nil),
mock.EXPECT().ResizeContainer(testDiskID, "0").Return("", nil),
mock.EXPECT().List(nil).Return(&parts, nil),
mock.EXPECT().Info(testDiskID).Return(&disk, nil),
)
err := run(mock, growContainer{
id: testDiskID,
})
assert.NoError(t, err, "should be able to grow container with valid data")
}
func TestGetTargetDiskInfo_WithRootInfoErr(t *testing.T) {
const testDiskID = "root"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mock := mock_diskutil.NewMockDiskUtil(ctrl)
mock.EXPECT().Info("/").Return(nil, fmt.Errorf("error"))
di, err := getTargetDiskInfo(mock, testDiskID)
assert.Error(t, err, `should fail to get DiskInfo for /`)
assert.Nil(t, di)
}
func TestGetTargetDiskInfo_WithListErr(t *testing.T) {
const testDiskID = "disk1"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mock := mock_diskutil.NewMockDiskUtil(ctrl)
mock.EXPECT().List(nil).Return(nil, fmt.Errorf("error"))
di, err := getTargetDiskInfo(mock, testDiskID)
assert.Error(t, err, "should fail to get partition information")
assert.Nil(t, di)
}
func TestGetTargetDiskInfo_NoTargetDisk(t *testing.T) {
const (
testDiskID = "disk1"
testAllDisksID = "disk0"
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisks: []string{testAllDisksID},
}
mock := mock_diskutil.NewMockDiskUtil(ctrl)
mock.EXPECT().List(nil).Return(&parts, nil)
di, err := getTargetDiskInfo(mock, testDiskID)
assert.Error(t, err, "should fail to find targetDiskID in partition information")
assert.Nil(t, di, "should get nil data for invalid target disk")
}
func TestGetTargetDiskInfo_WithInfoErr(t *testing.T) {
const testDiskID = "disk1"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisks: []string{testDiskID},
}
mock := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mock.EXPECT().List(nil).Return(&parts, nil),
mock.EXPECT().Info(testDiskID).Return(nil, fmt.Errorf("error")),
)
di, err := getTargetDiskInfo(mock, testDiskID)
assert.Error(t, err, "should fail to get disk information")
assert.Nil(t, di, "should get nil data with info error")
}
func TestGetTargetDiskInfo_Success(t *testing.T) {
const testDiskID = "disk1"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisks: []string{testDiskID},
}
expectedDisk := &types.DiskInfo{}
mock := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mock.EXPECT().List(nil).Return(&parts, nil),
mock.EXPECT().Info(testDiskID).Return(expectedDisk, nil),
)
actualDisk, err := getTargetDiskInfo(mock, testDiskID)
assert.NoError(t, err, "should be able succeeded with valid info")
assert.Equal(t, expectedDisk, actualDisk, "should be able to get expected data from info")
}
func TestValidateDeviceID(t *testing.T) {
type args struct {
id string
partitions *types.SystemPartitions
}
tests := []struct {
name string
args args
wantValid bool
wantErr bool
}{
{
name: "without ID",
args: args{
id: "",
partitions: nil,
},
wantErr: true,
},
{
name: "invalid ID prefix",
args: args{
id: "bad",
partitions: nil,
},
wantErr: true,
},
{
name: "without disk number",
args: args{
id: "disk",
partitions: nil,
},
wantErr: true,
},
{
name: "no target disk",
args: args{
id: "disk2",
partitions: &types.SystemPartitions{
AllDisks: []string{"disk0", "disk1"},
},
},
wantErr: true,
},
{
name: "success",
args: args{
id: "disk0",
partitions: &types.SystemPartitions{
AllDisks: []string{"disk0", "disk1"},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateDeviceID(tt.args.id, tt.args.partitions)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
| 336 |
ec2-macos-utils | aws | Go | // Package cmd provides the functionality necessary for CLI commands in EC2 macOS Utils.
package cmd
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/aws/ec2-macos-utils/internal/build"
)
const shortLicenseText = "Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved."
// MainCommand provides the main program entrypoint that dispatches to utility subcommands.
func MainCommand() *cobra.Command {
cmd := rootCommand()
cmds := []*cobra.Command{
growContainerCommand(),
}
for i := range cmds {
cmd.AddCommand(cmds[i])
}
return cmd
}
// rootCommand builds a root command object for program run.
func rootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "ec2-macos-utils",
Short: "utilities for EC2 macOS instances",
Long: strings.TrimSpace(`
This command provides utilities for common tasks on EC2 macOS instances to simplify operation & administration.
This includes disk manipulation and system configuration helpers. Tasks are reached through subcommands, each with
help text and usages that accompany them.
`),
Version: build.Version,
SilenceUsage: true,
DisableAutoGenTag: true,
}
versionTemplate := "{{.Name}} {{.Version}} [%s]\n\n%s\n"
cmd.SetVersionTemplate(fmt.Sprintf(versionTemplate, build.CommitDate, shortLicenseText))
var verbose bool
cmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging output")
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
level := logrus.InfoLevel
if verbose {
level = logrus.DebugLevel
}
setupLogging(level)
return nil
}
return cmd
}
// setupLogging configures logrus to use the desired timestamp format and log level.
func setupLogging(level logrus.Level) {
Formatter := &logrus.TextFormatter{}
// Configure the formatter
Formatter.TimestampFormat = time.RFC822
Formatter.FullTimestamp = true
// Set the desired log level
logrus.SetLevel(level)
logrus.SetFormatter(Formatter)
}
func hasRootPrivileges() bool {
return os.Geteuid() == 0
}
// assertRootPrivileges checks if the command is running with root permissions.
// If the command doesn't have root permissions, a help message is logged with
// an example and an error is returned.
func assertRootPrivileges(cmd *cobra.Command, args []string) error {
logrus.Debug("Checking user permissions...")
ok := hasRootPrivileges()
if !ok {
logrus.Warn("Root privileges required")
return errors.New("root privileges required, re-run command with sudo")
}
return nil
}
| 99 |
ec2-macos-utils | aws | Go | package main
import (
"os"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra/doc"
"github.com/aws/ec2-macos-utils/internal/cmd"
)
func main() {
outdir := "./docs"
args := os.Args
if len(args) >= 2 {
outdir = args[1]
}
logrus.WithField("outdir", outdir).Info("generating docs")
if err := os.MkdirAll(outdir, 0755); err != nil {
panic(err)
}
err := doc.GenMarkdownTree(cmd.MainCommand(), outdir)
if err != nil {
panic(err)
}
logrus.WithField("outdir", outdir).Info("generated docs")
}
| 31 |
ec2-macos-utils | aws | Go | package contextual
import (
"context"
"github.com/aws/ec2-macos-utils/internal/system"
)
// productKey is used to set and retrieve context held values for Product.
var productKey = struct{}{}
// WithProduct extends the context to provide a Product.
func WithProduct(ctx context.Context, product *system.Product) context.Context {
return context.WithValue(ctx, productKey, product)
}
// Product fetches the system's Product provided in ctx.
func Product(ctx context.Context) *system.Product {
if val := ctx.Value(productKey); val != nil {
if v, ok := val.(*system.Product); ok {
return v
}
panic("incoherent context")
}
return nil
}
| 28 |
ec2-macos-utils | aws | Go | package diskutil
import (
"fmt"
"io"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
"howett.net/plist"
)
// Decoder outlines the functionality necessary for decoding plist output from the macOS diskutil command.
type Decoder interface {
// DecodeSystemPartitions takes an io.ReadSeeker for the raw plist data of all disks and partition information
// and decodes it into a new types.SystemPartitions struct.
DecodeSystemPartitions(reader io.ReadSeeker) (*types.SystemPartitions, error)
// DecodeDiskInfo takes an io.ReadSeeker for the raw plist data of disk information and decodes it into
// a new types.DiskInfo struct.
DecodeDiskInfo(reader io.ReadSeeker) (*types.DiskInfo, error)
}
// PlistDecoder provides the plist Decoder implementation.
type PlistDecoder struct{}
// DecodeSystemPartitions assumes the io.ReadSeeker it's given contains raw plist data and attempts to decode that.
func (d *PlistDecoder) DecodeSystemPartitions(reader io.ReadSeeker) (*types.SystemPartitions, error) {
// Set up a new SystemPartitions and create a decoder from the reader
partitions := &types.SystemPartitions{}
decoder := plist.NewDecoder(reader)
// Decode the plist output from diskutil into a SystemPartitions struct for easier access
err := decoder.Decode(partitions)
if err != nil {
return nil, fmt.Errorf("error decoding list: %w", err)
}
return partitions, nil
}
// DecodeDiskInfo assumes the io.ReadSeeker it's given contains raw plist data and attempts to decode that.
func (d *PlistDecoder) DecodeDiskInfo(reader io.ReadSeeker) (*types.DiskInfo, error) {
// Set up a new DiskInfo and create a decoder from the reader
disk := &types.DiskInfo{}
decoder := plist.NewDecoder(reader)
// Decode the plist output from diskutil into a DiskInfo struct for easier access
err := decoder.Decode(disk)
if err != nil {
return nil, fmt.Errorf("error decoding disk info: %w", err)
}
return disk, nil
}
| 55 |
ec2-macos-utils | aws | Go | package diskutil
import (
_ "embed"
"strings"
"testing"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
"github.com/stretchr/testify/assert"
)
var (
//go:embed testdata/decoder/broken_disk_info.plist
// decoderBrokenDiskInfo contains a disk plist file that is missing the plist header.
decoderBrokenDiskInfo string
//go:embed testdata/decoder/disk_info.plist
// decoderDiskInfo contains a disk plist file that is properly formatted (but is also sparse).
decoderDiskInfo string
//go:embed testdata/decoder/broken_container_info.plist
// decoderBrokenContainerInfo contains a container plist file that is missing the plist header.
decoderBrokenContainerInfo string
//go:embed testdata/decoder/container_info.plist
// decoderContainerInfo contains a container plist file that is properly formatted (but is also sparse).
decoderContainerInfo string
//go:embed testdata/decoder/broken_list.plist
// decoderBrokenList contains a container plist file that is missing the plist header.
decoderBrokenList string
//go:embed testdata/decoder/list.plist
// decoderList contains a container plist file that is properly formatted (but is also sparse).
decoderList string
)
func TestPlistDecoder_DecodeDiskInfo_WithoutInput(t *testing.T) {
d := &PlistDecoder{}
reader := strings.NewReader("")
expectedDisk := &types.DiskInfo{}
actualDisk, err := d.DecodeDiskInfo(reader)
assert.NoError(t, err, "should be able to decode empty input")
assert.ObjectsAreEqualValues(expectedDisk, actualDisk)
}
func TestPlistDecoder_DecodeDiskInfo_WithoutPlistInput(t *testing.T) {
d := &PlistDecoder{}
reader := strings.NewReader("this is not a plist")
actualDisk, err := d.DecodeDiskInfo(reader)
assert.Error(t, err, "shouldn't be able to decode non-plist input")
assert.Nil(t, actualDisk, "should get nil since decode failed")
}
func TestPlistDecoder_DecodeDiskInfo_WithBrokenDiskInfoPlist(t *testing.T) {
d := &PlistDecoder{}
reader := strings.NewReader(decoderBrokenDiskInfo)
actualDisk, err := d.DecodeDiskInfo(reader)
assert.Error(t, err, "shouldn't be able to decode broken plist data")
assert.Nil(t, actualDisk, "should get nil since decode failed")
}
func TestPlistDecoder_DecodeDiskInfo_DiskSuccess(t *testing.T) {
const (
testDiskID = "disk2"
testPhysicalStoreID = "disk0s2"
availableSpare int = 100
)
d := &PlistDecoder{}
reader := strings.NewReader(decoderDiskInfo)
expectedDisk := &types.DiskInfo{
AESHardware: false,
APFSContainerReference: testDiskID,
APFSPhysicalStores: []types.APFSPhysicalStore{{DeviceIdentifier: testPhysicalStoreID}},
SMARTDeviceSpecificKeysMayVaryNotGuaranteed: &types.SmartDeviceInfo{AvailableSpare: availableSpare},
}
actualDisk, err := d.DecodeDiskInfo(reader)
assert.NoError(t, err, "should be able to decode valid disk plist data")
assert.ObjectsAreEqualValues(expectedDisk, actualDisk)
}
func TestPlistDecoder_DecodeDiskInfo_WithImproperContainerPlistInput(t *testing.T) {
d := &PlistDecoder{}
reader := strings.NewReader(decoderBrokenContainerInfo)
actualDisk, err := d.DecodeDiskInfo(reader)
assert.Error(t, err, "shouldn't be able to decode broken plist data")
assert.Nil(t, actualDisk, "should get nil since decode failed")
}
func TestPlistDecoder_DecodeDiskInfo_ContainerSuccess(t *testing.T) {
const (
testDiskID = "disk2"
testPhysicalStoreID = "disk0s2"
containerSize uint64 = 6_000_000
freeSize uint64 = 4_000_000
)
d := &PlistDecoder{}
reader := strings.NewReader(decoderContainerInfo)
expectedDisk := &types.DiskInfo{
ContainerInfo: types.ContainerInfo{
APFSContainerFree: freeSize,
APFSContainerSize: containerSize,
},
AESHardware: false,
APFSContainerReference: testDiskID,
APFSPhysicalStores: []types.APFSPhysicalStore{{DeviceIdentifier: testPhysicalStoreID}},
}
actualDisk, err := d.DecodeDiskInfo(reader)
assert.NoError(t, err, "should be able to decode valid container plist data")
assert.ObjectsAreEqualValues(expectedDisk, actualDisk)
}
func TestPlistDecoder_DecodeSystemPartitions_WithoutInput(t *testing.T) {
d := &PlistDecoder{}
reader := strings.NewReader("")
wantParts := &types.SystemPartitions{}
gotParts, err := d.DecodeSystemPartitions(reader)
assert.NoError(t, err, "should be able to decode empty input")
assert.ObjectsAreEqualValues(wantParts, gotParts)
}
func TestPlistDecoder_DecodeSystemPartitions_WithoutPlistInput(t *testing.T) {
d := &PlistDecoder{}
reader := strings.NewReader("this is not a plist")
gotParts, err := d.DecodeSystemPartitions(reader)
assert.Error(t, err, "shouldn't be able to decode non-plist input")
assert.Nil(t, gotParts, "should get nil since decode failed")
}
func TestPlistDecoder_DecodeSystemPartitions_WithBrokenDiskInfoPlist(t *testing.T) {
d := &PlistDecoder{}
reader := strings.NewReader(decoderBrokenList)
gotParts, err := d.DecodeSystemPartitions(reader)
assert.Error(t, err, "shouldn't be able to decode broken plist data")
assert.Nil(t, gotParts, "should get nil since decode failed")
}
func TestPlistDecoder_DecodeSystemPartitions_Success(t *testing.T) {
const (
testDiskID = "disk0"
testPartID = "disk0s1"
diskSize uint64 = 1_000_000
testPhysicalStoreID = "disk0s2"
testVolumeID = "disk2s4"
testSnapshotUUID = "AAAAAAAA-BBBB-CCCC-DDDD-FFFFFFFFFFFF"
testVolumeName = "Macintosh HD - Data"
)
d := &PlistDecoder{}
reader := strings.NewReader(decoderList)
wantParts := &types.SystemPartitions{
AllDisks: []string{testDiskID},
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Partitions: []types.Partition{
{DeviceIdentifier: testPartID},
},
Size: diskSize,
},
{
APFSPhysicalStores: []types.APFSPhysicalStoreID{
{DeviceIdentifier: testPhysicalStoreID},
},
APFSVolumes: []types.APFSVolume{
{
DeviceIdentifier: testVolumeID,
MountedSnapshots: []types.Snapshot{
{SnapshotUUID: testSnapshotUUID},
},
},
},
},
},
VolumesFromDisks: []string{testVolumeName},
WholeDisks: []string{testDiskID},
}
gotParts, err := d.DecodeSystemPartitions(reader)
assert.NoError(t, err, "should be able to decode valid list plist data")
assert.ObjectsAreEqualValues(wantParts, gotParts)
}
| 210 |
ec2-macos-utils | aws | Go | // Package diskutil provides the functionality necessary for interacting with macOS's diskutil CLI.
package diskutil
//go:generate mockgen -source=diskutil.go -destination=mocks/mock_diskutil.go
import (
"errors"
"fmt"
"strings"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
"github.com/aws/ec2-macos-utils/internal/system"
"github.com/Masterminds/semver"
)
const (
// minimumGrowFreeSpace defines the minimum amount of free space (in bytes) required to attempt running
// diskutil's resize command.
minimumGrowFreeSpace = 1000000
)
// ErrReadOnly identifies errors due to dry-run not being able to continue without mutating changes.
var ErrReadOnly = errors.New("read-only mode")
// FreeSpaceError defines an error to distinguish when there's not enough space to grow the specified container.
type FreeSpaceError struct {
freeSpaceBytes uint64
}
func (e FreeSpaceError) Error() string {
return fmt.Sprintf("%d bytes available", e.freeSpaceBytes)
}
// DiskUtil outlines the functionality necessary for wrapping macOS's diskutil tool.
type DiskUtil interface {
// APFS outlines the functionality necessary for wrapping diskutil's "apfs" verb.
APFS
// Info fetches raw disk information for the specified device identifier.
Info(id string) (*types.DiskInfo, error)
// List fetches all disk and partition information for the system.
// This output will be filtered based on the args provided.
List(args []string) (*types.SystemPartitions, error)
// RepairDisk attempts to repair the disk for the specified device identifier.
// This process requires root access.
RepairDisk(id string) (string, error)
}
// APFS outlines the functionality necessary for wrapping diskutil's "apfs" verb.
type APFS interface {
// ResizeContainer attempts to grow the APFS container with the given device identifier
// to the specified size. If the given size is 0, ResizeContainer will attempt to grow
// the disk to its maximum size.
ResizeContainer(id string, size string) (string, error)
}
// readonlyWrapper provides a typed implementation for DiskUtil that substitutes mutating
// methods with dryrun alternatives.
type readonlyWrapper struct {
// impl is the DiskUtil implementation that should have mutating methods substituted for dryrun methods.
impl DiskUtil
}
func (r readonlyWrapper) ResizeContainer(id string, size string) (string, error) {
return "", fmt.Errorf("skip resize container: %w", ErrReadOnly)
}
func (r readonlyWrapper) Info(id string) (*types.DiskInfo, error) {
return r.impl.Info(id)
}
func (r readonlyWrapper) List(args []string) (*types.SystemPartitions, error) {
return r.impl.List(args)
}
func (r readonlyWrapper) RepairDisk(id string) (string, error) {
return "", fmt.Errorf("skip repair disk: %w", ErrReadOnly)
}
// Type assertion to ensure readonlyWrapper implements the DiskUtil interface.
var _ DiskUtil = (*readonlyWrapper)(nil)
// Dryrun takes a DiskUtil implementation and wraps the mutating methods with dryrun alternatives.
func Dryrun(impl DiskUtil) *readonlyWrapper {
return &readonlyWrapper{impl}
}
// ForProduct creates a new diskutil controller for the given product.
func ForProduct(p *system.Product) (DiskUtil, error) {
switch p.Release {
case system.Mojave:
return newMojave(p.Version)
case system.Catalina:
return newCatalina(p.Version)
case system.BigSur:
return newBigSur(p.Version)
case system.Monterey:
return newMonterey(p.Version)
case system.Ventura:
return newVentura(p.Version)
default:
return nil, errors.New("unknown release")
}
}
// newMojave configures the DiskUtil for the specified Mojave version.
func newMojave(version semver.Version) (*diskutilMojave, error) {
du := &diskutilMojave{
embeddedDiskutil: &DiskUtilityCmd{},
dec: &PlistDecoder{},
}
return du, nil
}
// newCatalina configures the DiskUtil for the specified Catalina version.
func newCatalina(version semver.Version) (*diskutilCatalina, error) {
du := &diskutilCatalina{
embeddedDiskutil: &DiskUtilityCmd{},
dec: &PlistDecoder{},
}
return du, nil
}
// newBigSur configures the DiskUtil for the specified Big Sur version.
func newBigSur(version semver.Version) (*diskutilBigSur, error) {
du := &diskutilBigSur{
embeddedDiskutil: &DiskUtilityCmd{},
dec: &PlistDecoder{},
}
return du, nil
}
// newMonterey configures the DiskUtil for the specified Monterey version.
func newMonterey(version semver.Version) (*diskutilMonterey, error) {
du := &diskutilMonterey{
embeddedDiskutil: &DiskUtilityCmd{},
dec: &PlistDecoder{},
}
return du, nil
}
// newVentura configures the DiskUtil for the specified Ventura version.
func newVentura(version semver.Version) (*diskutilMonterey, error) {
du := &diskutilMonterey{
embeddedDiskutil: &DiskUtilityCmd{},
dec: &PlistDecoder{},
}
return du, nil
}
// embeddedDiskutil is a private interface used to embed UtilImpl into implementation-specific structs.
type embeddedDiskutil interface {
UtilImpl
}
// diskutilMojave wraps all the functionality necessary for interacting with macOS's diskutil on Mojave. The
// major difference is that the raw plist data emitted by macOS's diskutil CLI doesn't include the physical store data.
// This requires a separate fetch to find the specific physical store information for the disk(s).
type diskutilMojave struct {
// embeddedDiskutil provides the diskutil implementation to prevent manual wiring between UtilImpl and DiskUtil.
embeddedDiskutil
// dec is the Decoder used to decode the raw output from UtilImpl into usable structs.
dec Decoder
}
// List utilizes the UtilImpl.List method to fetch the raw list output from diskutil and returns the decoded
// output in a SystemPartitions struct. List also attempts to update each APFS Volume's physical store via a separate
// fetch method since the version of diskutil on Mojave doesn't provide that information in its List verb.
//
// It is possible for List to fail when updating the physical stores, but it will still return the original data
// that was decoded into the SystemPartitions struct.
func (d *diskutilMojave) List(args []string) (*types.SystemPartitions, error) {
partitions, err := list(d.embeddedDiskutil, d.dec, args)
if err != nil {
return nil, err
}
err = updatePhysicalStores(partitions)
if err != nil {
return partitions, err
}
return partitions, nil
}
// Info utilizes the UtilImpl.Info method to fetch the raw disk output from diskutil and returns the decoded
// output in a DiskInfo struct. Info also attempts to update each APFS Volume's physical store via a separate
// fetch method since the version of diskutil on Mojave doesn't provide that information in its Info verb.
//
// It is possible for Info to fail when updating the physical stores, but it will still return the original data
// that was decoded into the DiskInfo struct.
func (d *diskutilMojave) Info(id string) (*types.DiskInfo, error) {
disk, err := info(d.embeddedDiskutil, d.dec, id)
if err != nil {
return nil, err
}
err = updatePhysicalStore(disk)
if err != nil {
return disk, err
}
return disk, nil
}
// diskutilCatalina wraps all the functionality necessary for interacting with macOS's diskutil in GoLang.
type diskutilCatalina struct {
// embeddedDiskutil provides the diskutil implementation to prevent manual wiring between UtilImpl and DiskUtil.
embeddedDiskutil
// dec is the Decoder used to decode the raw output from UtilImpl into usable structs.
dec Decoder
}
// List utilizes the UtilImpl.List method to fetch the raw list output from diskutil and returns the decoded
// output in a SystemPartitions struct.
func (d *diskutilCatalina) List(args []string) (*types.SystemPartitions, error) {
return list(d.embeddedDiskutil, d.dec, args)
}
// Info utilizes the UtilImpl.Info method to fetch the raw disk output from diskutil and returns the decoded
// output in a DiskInfo struct.
func (d *diskutilCatalina) Info(id string) (*types.DiskInfo, error) {
return info(d.embeddedDiskutil, d.dec, id)
}
// diskutilBigSur wraps all the functionality necessary for interacting with macOS's diskutil in GoLang.
type diskutilBigSur struct {
// embeddedDiskutil provides the diskutil implementation to prevent manual wiring between UtilImpl and DiskUtil.
embeddedDiskutil
// dec is the Decoder used to decode the raw output from UtilImpl into usable structs.
dec Decoder
}
// List utilizes the UtilImpl.List method to fetch the raw list output from diskutil and returns the decoded
// output in a SystemPartitions struct.
func (d *diskutilBigSur) List(args []string) (*types.SystemPartitions, error) {
return list(d.embeddedDiskutil, d.dec, args)
}
// Info utilizes the UtilImpl.Info method to fetch the raw disk output from diskutil and returns the decoded
// output in a DiskInfo struct.
func (d *diskutilBigSur) Info(id string) (*types.DiskInfo, error) {
return info(d.embeddedDiskutil, d.dec, id)
}
// diskutilMonterey wraps all the functionality necessary for interacting with macOS's diskutil in GoLang.
type diskutilMonterey struct {
// embeddedDiskutil provides the diskutil implementation to prevent manual wiring between UtilImpl and DiskUtil.
embeddedDiskutil
// dec is the Decoder used to decode the raw output from UtilImpl into usable structs.
dec Decoder
}
// List utilizes the UtilImpl.List method to fetch the raw list output from diskutil and returns the decoded
// output in a SystemPartitions struct.
func (d *diskutilMonterey) List(args []string) (*types.SystemPartitions, error) {
return list(d.embeddedDiskutil, d.dec, args)
}
// Info utilizes the UtilImpl.Info method to fetch the raw disk output from diskutil and returns the decoded
// output in a DiskInfo struct.
func (d *diskutilMonterey) Info(id string) (*types.DiskInfo, error) {
return info(d.embeddedDiskutil, d.dec, id)
}
// diskutilVentura wraps all the functionality necessary for interacting with macOS's diskutil in GoLang.
type diskutilVentura struct {
// embeddedDiskutil provides the diskutil implementation to prevent manual wiring between UtilImpl and DiskUtil.
embeddedDiskutil
// dec is the Decoder used to decode the raw output from UtilImpl into usable structs.
dec Decoder
}
// List utilizes the UtilImpl.List method to fetch the raw list output from diskutil and returns the decoded
// output in a SystemPartitions struct.
func (d *diskutilVentura) List(args []string) (*types.SystemPartitions, error) {
return list(d.embeddedDiskutil, d.dec, args)
}
// Info utilizes the UtilImpl.Info method to fetch the raw disk output from diskutil and returns the decoded
// output in a DiskInfo struct.
func (d *diskutilVentura) Info(id string) (*types.DiskInfo, error) {
return info(d.embeddedDiskutil, d.dec, id)
}
// info is a wrapper that fetches the raw diskutil info data and decodes it into a usable types.DiskInfo struct.
func info(util UtilImpl, decoder Decoder, id string) (*types.DiskInfo, error) {
// Fetch the raw disk information from the util
rawDisk, err := util.Info(id)
if err != nil {
return nil, err
}
// Create a reader for the raw data
reader := strings.NewReader(rawDisk)
// Decode the raw data into a more usable DiskInfo struct
disk, err := decoder.DecodeDiskInfo(reader)
if err != nil {
return nil, err
}
return disk, nil
}
// list is a wrapper that fetches the raw diskutil list data and decodes it into a usable types.SystemPartitions struct.
func list(util UtilImpl, decoder Decoder, args []string) (*types.SystemPartitions, error) {
// Fetch the raw list information from the util
rawPartitions, err := util.List(args)
if err != nil {
return nil, err
}
// Create a reader for the raw data
reader := strings.NewReader(rawPartitions)
// Decode the raw data into a more usable SystemPartitions struct
partitions, err := decoder.DecodeSystemPartitions(reader)
if err != nil {
return nil, err
}
return partitions, nil
}
| 335 |
ec2-macos-utils | aws | Go | package diskutil
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestMinimumGrowSpaceError_Error(t *testing.T) {
const expectedSize uint64 = 0
e := FreeSpaceError{
freeSpaceBytes: expectedSize,
}
expectedErrorMessage := fmt.Sprintf("%d bytes available", 0)
actualErrorMessage := e.Error()
assert.Equal(t, expectedErrorMessage, actualErrorMessage, "expected message to include metadata")
}
| 23 |
ec2-macos-utils | aws | Go | package diskutil
import (
"errors"
"fmt"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
"github.com/dustin/go-humanize"
"github.com/sirupsen/logrus"
)
// GrowContainer grows a container to its maximum size by performing the following operations:
// 1. Verify that the given types.DiskInfo is an APFS container that can be resized.
// 2. Fetch the types.DiskInfo for the underlying physical disk (if the container isn't a physical device).
// 3. Repair the parent disk to force the kernel to get the latest GPT information for the disk.
// 4. Check if there's enough free space on the disk to perform an APFS.ResizeContainer.
// 5. Resize the container to its maximum size.
func GrowContainer(u DiskUtil, container *types.DiskInfo) error {
if container == nil {
return fmt.Errorf("unable to resize nil container")
}
logrus.WithField("device_id", container.DeviceIdentifier).Info("Checking if device can be APFS resized...")
if err := canAPFSResize(container); err != nil {
return fmt.Errorf("unable to resize container: %w", err)
}
logrus.Info("Device can be resized")
// We'll need to mutate the container's underlying physical disk, so resolve that if that's not what we have
// (which is basically guaranteed to not have physical disk for container resizes, should be the virtual APFS
// container).
phy := container
if !phy.IsPhysical() {
parent, err := u.Info(phy.ParentWholeDisk)
if err != nil {
return fmt.Errorf("unable to determine physical disk: %w", err)
}
// using the parent disk of provided disk (probably a container)
phy = parent
}
// Capture any free space on a resized disk
logrus.Info("Repairing the parent disk...")
_, err := repairParentDisk(u, phy)
if err != nil {
return fmt.Errorf("cannot update free space on disk: %w", err)
}
logrus.Info("Successfully repaired the parent disk")
// Minimum free space to resize required - bail if we don't have enough.
logrus.WithField("device_id", phy.DeviceIdentifier).Info("Fetching amount of free space on device...")
totalFree, err := getDiskFreeSpace(u, phy)
if err != nil {
return fmt.Errorf("cannot determine available space on disk: %w", err)
}
logrus.WithField("freed_bytes", humanize.Bytes(totalFree)).Trace("updated free space on disk")
if totalFree < minimumGrowFreeSpace {
logrus.WithFields(logrus.Fields{
"total_free": humanize.Bytes(totalFree),
"required_minimum": humanize.Bytes(minimumGrowFreeSpace),
}).Warn("Available free space does not meet required minimum to grow")
return fmt.Errorf("not enough space to resize container: %w", FreeSpaceError{totalFree})
}
logrus.WithFields(logrus.Fields{
"device_id": phy.DeviceIdentifier,
"free_space": humanize.Bytes(totalFree),
}).Info("Resizing container to maximum size...")
out, err := u.ResizeContainer(phy.DeviceIdentifier, "0")
logrus.WithField("out", out).Debug("Resize output")
if errors.Is(err, ErrReadOnly) {
logrus.WithError(err).Warn("Would have resized container to max size")
} else if err != nil {
return err
}
return nil
}
// canAPFSResize does some basic checking on a types.DiskInfo to see if it matches the criteria necessary for
// APFS.ResizeContainer to succeed. It checks that the types.ContainerInfo is not empty and that the
// types.ContainerInfo's FilesystemType is "apfs".
func canAPFSResize(disk *types.DiskInfo) error {
if disk == nil {
return errors.New("no disk information")
}
// If the disk has ContainerInfo, check the FilesystemType
if (disk.ContainerInfo != types.ContainerInfo{}) {
if disk.ContainerInfo.FilesystemType == "apfs" {
return nil
}
}
// Check if the disk has an APFS Container reference and APFS Physical Stores
if disk.APFSContainerReference != "" && len(disk.APFSPhysicalStores) > 0 {
return nil
}
return errors.New("disk is not apfs")
}
// getDiskFreeSpace calculates the amount of free space a disk has available by summing the sizes of each partition
// and then subtracting that from the total size. See types.SystemPartitions for more information.
func getDiskFreeSpace(util DiskUtil, disk *types.DiskInfo) (uint64, error) {
partitions, err := util.List(nil)
if err != nil {
return 0, err
}
parentDiskID, err := disk.ParentDeviceID()
if err != nil {
return 0, err
}
return partitions.AvailableDiskSpace(parentDiskID)
}
// repairParentDisk attempts to find and repair the parent device for the given disk in order to update the current
// amount of free space available.
func repairParentDisk(utility DiskUtil, disk *types.DiskInfo) (message string, err error) {
// Get the device identifier for the parent disk
parentDiskID, err := disk.ParentDeviceID()
if err != nil {
return fmt.Sprintf("failed to get the parent disk ID for container [%s]", disk.DeviceIdentifier), err
}
// Attempt to repair the container's parent disk
logrus.WithField("parent_id", parentDiskID).Info("Repairing parent disk...")
out, err := utility.RepairDisk(parentDiskID)
logrus.WithField("out", out).Debug("RepairDisk output")
if errors.Is(err, ErrReadOnly) {
logrus.WithError(err).Warn("Would have repaired parent disk")
} else if err != nil {
return out, err
}
return out, nil
}
| 141 |
ec2-macos-utils | aws | Go | package diskutil
import (
"fmt"
"io/ioutil"
"testing"
mock_diskutil "github.com/aws/ec2-macos-utils/internal/diskutil/mocks"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
"github.com/golang/mock/gomock"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func init() {
logrus.SetOutput(ioutil.Discard)
}
func TestGrowContainer_WithoutContainer(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
err := GrowContainer(mockUtility, nil)
assert.Error(t, err, "shouldn't be able to grow container with nil container")
}
func TestGrowContainer_WithEmptyContainer(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
disk := types.DiskInfo{}
err := GrowContainer(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to grow container with empty container")
}
func TestGrowContainer_WithInfoErr(t *testing.T) {
const testDiskID = "disk1"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
mockUtility.EXPECT().Info(testDiskID).Return(nil, fmt.Errorf("error"))
disk := types.DiskInfo{
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
ParentWholeDisk: testDiskID,
VirtualOrPhysical: "Virtual",
}
err := GrowContainer(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to grow container with info error")
}
func TestGrowContainer_WithRepairDiskErr(t *testing.T) {
const testDiskID = "disk1"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
mockUtility.EXPECT().RepairDisk(testDiskID).Return("", fmt.Errorf("error"))
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
ParentWholeDisk: testDiskID,
VirtualOrPhysical: "Physical",
}
err := GrowContainer(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to grow container with repair disk error")
}
func TestGrowContainer_WithListError(t *testing.T) {
const testDiskID = "disk1"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mockUtility.EXPECT().RepairDisk(testDiskID).Return("", nil),
mockUtility.EXPECT().List(nil).Return(nil, fmt.Errorf("error")),
)
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
ParentWholeDisk: testDiskID,
VirtualOrPhysical: "Physical",
}
err := GrowContainer(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to grow container with list error")
}
func TestGrowContainer_WithoutFreeSpace(t *testing.T) {
const (
testDiskID = "disk1"
// total disk size
diskSize uint64 = 1_000_000
// individual partition space occupied
partSize uint64 = 500_000
// expected amount of free space
expectedFreeSpace = 0
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Size: diskSize,
Partitions: []types.Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mockUtility.EXPECT().RepairDisk(testDiskID).Return("", nil),
mockUtility.EXPECT().List(nil).Return(&parts, nil),
)
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
ParentWholeDisk: testDiskID,
VirtualOrPhysical: "Physical",
}
expectedErr := fmt.Errorf("not enough space to resize container: %w", FreeSpaceError{expectedFreeSpace})
actualErr := GrowContainer(mockUtility, &disk)
assert.Error(t, actualErr, "shouldn't be able to grow container without free space")
assert.Equal(t, expectedErr, actualErr, "should get FreeSpaceError since there's no free space")
}
func TestGrowContainer_WithResizeContainerError(t *testing.T) {
const (
testDiskID = "disk1"
// total disk size
diskSize uint64 = 3_000_000
// individual partition space occupied
partSize uint64 = 500_000
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Size: diskSize,
Partitions: []types.Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mockUtility.EXPECT().RepairDisk(testDiskID).Return("", nil),
mockUtility.EXPECT().List(nil).Return(&parts, nil),
mockUtility.EXPECT().ResizeContainer(testDiskID, "0").Return("", fmt.Errorf("error")),
)
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
DeviceIdentifier: testDiskID,
ParentWholeDisk: testDiskID,
VirtualOrPhysical: "Physical",
}
err := GrowContainer(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to grow container with resize container error")
}
func TestGrowContainer_Success(t *testing.T) {
const (
testDiskID = "disk1"
// total disk size
diskSize uint64 = 3_000_000
// individual partition space occupied
partSize uint64 = 500_000
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
parts := types.SystemPartitions{
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Size: diskSize,
Partitions: []types.Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
gomock.InOrder(
mockUtility.EXPECT().RepairDisk(testDiskID).Return("", nil),
mockUtility.EXPECT().List(nil).Return(&parts, nil),
mockUtility.EXPECT().ResizeContainer(testDiskID, "0").Return("", nil),
)
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
DeviceIdentifier: testDiskID,
ParentWholeDisk: testDiskID,
VirtualOrPhysical: "Physical",
}
err := GrowContainer(mockUtility, &disk)
assert.NoError(t, err, "should be able to grow container")
}
func TestCanAPFSResize(t *testing.T) {
type args struct {
container *types.DiskInfo
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "WithoutContainer",
args: args{
container: nil,
},
wantErr: true,
},
{
name: "WithEmptyContainer",
args: args{
container: &types.DiskInfo{
ContainerInfo: types.ContainerInfo{},
},
},
wantErr: true,
},
{
name: "WithoutAPFSFilesystem",
args: args{
container: &types.DiskInfo{
ContainerInfo: types.ContainerInfo{
FilesystemType: "not apfs",
},
},
},
wantErr: true,
},
{
name: "WithoutAPFSReference",
args: args{
container: &types.DiskInfo{},
},
wantErr: true,
},
{
name: "WithoutAPFSPhysicalStores",
args: args{
container: &types.DiskInfo{
APFSContainerReference: "disk1",
},
},
wantErr: true,
},
{
name: "SuccessAPFSFilesystem",
args: args{
container: &types.DiskInfo{
ContainerInfo: types.ContainerInfo{
FilesystemType: "apfs",
},
},
},
wantErr: false,
},
{
name: "SuccessAPFSContainer",
args: args{
container: &types.DiskInfo{
APFSContainerReference: "disk1",
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: "disk0"},
},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := canAPFSResize(tt.args.container)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestGetDiskFreeSpace_WithListErr(t *testing.T) {
const expectedSize uint64 = 0
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
mockUtility.EXPECT().List(nil).Return(nil, fmt.Errorf("error"))
disk := types.DiskInfo{}
actual, err := getDiskFreeSpace(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to get free space with list error")
assert.Equal(t, expectedSize, actual, "shouldn't get size due to list error")
}
func TestGetDiskFreeSpace_WithNilSystemPartitions(t *testing.T) {
const expectedSize uint64 = 0
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
mockUtility.EXPECT().List(nil).Return(nil, nil)
disk := types.DiskInfo{}
actual, err := getDiskFreeSpace(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to get free space for nil partitions")
assert.Equal(t, expectedSize, actual, "shouldn't get size due to nil partitions")
}
func TestGetDiskFreeSpace_WithoutFreeSpace(t *testing.T) {
const (
testDiskID = "disk1"
// total disk size
diskSize uint64 = 1_000_000
// individual partition space occupied
partSize uint64 = 500_000
// should see: diskSize - (2 * partSize)
expectedFreeSpace uint64 = 0
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
parts := types.SystemPartitions{
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Size: diskSize,
Partitions: []types.Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
mockUtility.EXPECT().List(nil).Return(&parts, nil)
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
}
actual, err := getDiskFreeSpace(mockUtility, &disk)
assert.NoError(t, err, "should be able to calculate free space with valid data")
assert.Equal(t, expectedFreeSpace, actual, "should have calculated free space based on partitions")
}
func TestGetDiskFreeSpace_FreeSpace(t *testing.T) {
const (
testDiskID = "disk1"
// total disk size
diskSize uint64 = 2_000_000
// individual partition space occupied
partSize uint64 = 500_000
// should see: diskSize - (2 * partSize)
expectedFreeSpace uint64 = 1_000_000
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
parts := types.SystemPartitions{
AllDisksAndPartitions: []types.DiskPart{
{
DeviceIdentifier: testDiskID,
Size: diskSize,
Partitions: []types.Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
mockUtility.EXPECT().List(nil).Return(&parts, nil)
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
}
actual, err := getDiskFreeSpace(mockUtility, &disk)
assert.NoError(t, err, "should be able to calculate free space with valid data")
assert.Equal(t, expectedFreeSpace, actual, "should have calculated free space based on partitions")
}
func TestRepairParentDisk_WithoutDiskInfo(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
disk := types.DiskInfo{}
expectedMessage := fmt.Sprintf("failed to get the parent disk ID for container [%s]", disk.DeviceIdentifier)
actualMessage, err := repairParentDisk(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to repair disk without disk info")
assert.Equal(t, expectedMessage, actualMessage, "should see error message for device")
}
func TestRepairParentDisk_WithRepairDiskErr(t *testing.T) {
const testDiskID = "disk0"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
mockUtility.EXPECT().RepairDisk(testDiskID).Return("error", fmt.Errorf("error"))
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
}
expectedMessage := "error"
actualMessage, err := repairParentDisk(mockUtility, &disk)
assert.Error(t, err, "shouldn't be able to repair parent disk with repair disk error")
assert.Equal(t, expectedMessage, actualMessage, "should see error message for device")
}
func TestRepairParentDisk_Success(t *testing.T) {
const (
testDiskID = "disk0"
expectedMessage = ""
)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockUtility := mock_diskutil.NewMockDiskUtil(ctrl)
mockUtility.EXPECT().RepairDisk(testDiskID).Return("", nil)
disk := types.DiskInfo{
APFSPhysicalStores: []types.APFSPhysicalStore{
{DeviceIdentifier: testDiskID},
},
}
actualMessage, err := repairParentDisk(mockUtility, &disk)
assert.NoError(t, err, "should be able to repair parent with valid data")
assert.Equal(t, expectedMessage, actualMessage, "should see expected message")
}
| 536 |
ec2-macos-utils | aws | Go | package diskutil
import (
"fmt"
"regexp"
"github.com/aws/ec2-macos-utils/internal/diskutil/types"
"github.com/aws/ec2-macos-utils/internal/util"
)
// updatePhysicalStores provides separate functionality for fetching APFS physical stores for SystemPartitions.
func updatePhysicalStores(partitions *types.SystemPartitions) error {
// Independently update all APFS disks' physical stores
for i, part := range partitions.AllDisksAndPartitions {
// Only do the update if the disk/partition is APFS
if isAPFSVolume(part) {
// Fetch the physical store for the disk/partition
physicalStoreId, err := fetchPhysicalStore(part.DeviceIdentifier)
if err != nil {
return err
}
// Create a new physical store from the output
physicalStore := types.APFSPhysicalStoreID{physicalStoreId}
// Add the physical store to the DiskInfo
partitions.AllDisksAndPartitions[i].APFSPhysicalStores = append(part.APFSPhysicalStores, physicalStore)
}
}
return nil
}
// isAPFSVolume checks if a given DiskPart is an APFS container.
func isAPFSVolume(part types.DiskPart) bool {
return part.APFSVolumes != nil
}
// fetchPhysicalStore parses the human-readable output of the list verb for the given ID in order to fetch its
// physical store. This function is limited to returning only one physical store so the behavior might cause problems
// for fusion devices that have more than one APFS physical store.
func fetchPhysicalStore(id string) (string, error) {
// Create the command for running diskutil and parsing the output to retrieve the desired info (physical store)
// * list - specifies the diskutil 'list' verb for a specific device ID and returns the human-readable output
cmdPhysicalStore := []string{"diskutil", "list", id}
// Execute the command to parse output from diskutil list
out, err := util.ExecuteCommand(cmdPhysicalStore, "", nil, nil)
if err != nil {
return "", fmt.Errorf("%s: %w", out.Stderr, err)
}
return parsePhysicalStoreId(out.Stdout)
}
// parsePhysicalStoreId searches a raw string for the string "Physical Store disk[0-9]+(s[0-9]+)*". The regular
// expression "disk[0-9]+(s[0-9]+)*" matches any disk ID without the "/dev/" prefix.
func parsePhysicalStoreId(raw string) (string, error) {
physicalStoreExp := regexp.MustCompile("\\s*Physical Store disk[0-9]+(s[0-9]+)*")
diskIdExp := regexp.MustCompile("disk[0-9]+(s[0-9]+)*")
physicalStore := physicalStoreExp.FindString(raw)
diskId := diskIdExp.FindString(physicalStore)
if diskId == "" {
return "", fmt.Errorf("physical store not found")
}
return diskId, nil
}
// updatePhysicalStore provides separate functionality for fetching APFS physical stores for DiskInfo.
func updatePhysicalStore(disk *types.DiskInfo) error {
if isAPFSMedia(disk) {
physicalStoreId, err := fetchPhysicalStore(disk.DeviceIdentifier)
if err != nil {
return err
}
physicalStore := types.APFSPhysicalStore{physicalStoreId}
disk.APFSPhysicalStores = append(disk.APFSPhysicalStores, physicalStore)
}
return nil
}
// isAPFSMedia checks if the given DiskInfo is an APFS container or volume.
func isAPFSMedia(disk *types.DiskInfo) bool {
return disk.FilesystemType == "apfs" || disk.IORegistryEntryName == "AppleAPFSMedia"
}
| 91 |
ec2-macos-utils | aws | Go | package diskutil
import (
"fmt"
"github.com/aws/ec2-macos-utils/internal/util"
)
// UtilImpl outlines the functionality necessary for wrapping macOS's diskutil tool. The methods are intentionally
// named to correspond to diskutil(8)'s subcommand names as its API.
type UtilImpl interface {
// APFSImpl outlines the functionality necessary for wrapping diskutil's APFS verb.
APFSImpl
// Info fetches raw disk information for the specified device identifier.
Info(id string) (string, error)
// List fetches all disk and partition information for the system.
// This output will be filtered based on the args provided.
List(args []string) (string, error)
// RepairDisk attempts to repair the disk for the specified device identifier.
// This process requires root access.
RepairDisk(id string) (string, error)
}
// APFSImpl outlines the functionality necessary for wrapping diskutil's APFS verb.
type APFSImpl interface {
// ResizeContainer attempts to grow the APFS container with the given device identifier
// to the specified size. If the given size is 0, ResizeContainer will attempt to grow
// the disk to its maximum size.
ResizeContainer(id string, size string) (string, error)
}
// DiskUtilityCmd is an empty struct that provides the implementation for the DiskUtility interface.
type DiskUtilityCmd struct{}
// List uses the macOS diskutil list command to list disks and partitions in a plist format by passing the -plist arg.
// List also appends any given args to fully support the diskutil list verb.
func (d *DiskUtilityCmd) List(args []string) (string, error) {
// Create the diskutil command for retrieving all disk and partition information
// * -plist converts diskutil's output from human-readable to the plist format
cmdListDisks := []string{"diskutil", "list", "-plist"}
// Append arguments to the diskutil list verb
if len(args) > 0 {
cmdListDisks = append(cmdListDisks, args...)
}
// Execute the diskutil list command and store the output
cmdOut, err := util.ExecuteCommand(cmdListDisks, "", nil, nil)
if err != nil {
return cmdOut.Stdout, fmt.Errorf("diskutil: failed to run diskutil command to list all disks, stderr: [%s]: %w", cmdOut.Stderr, err)
}
return cmdOut.Stdout, nil
}
// Info uses the macOS diskutil info command to get detailed information about a disk, partition, or container
// format by passing the -plist arg.
func (d *DiskUtilityCmd) Info(id string) (string, error) {
// Create the diskutil command for retrieving disk information given a device identifier
// * -plist converts diskutil's output from human-readable to the plist format
// * id - the device identifier for the disk to be fetched
cmdDiskInfo := []string{"diskutil", "info", "-plist", id}
// Execute the diskutil info command and store the output
cmdOut, err := util.ExecuteCommand(cmdDiskInfo, "", nil, nil)
if err != nil {
return cmdOut.Stdout, fmt.Errorf("diskutil: failed to run diskutil command to fetch disk information, stderr: [%s]: %w", cmdOut.Stderr, err)
}
return cmdOut.Stdout, nil
}
// RepairDisk uses the macOS diskutil diskRepair command to repair the specified volume and get updated information
// (e.g. amount of free space).
func (d *DiskUtilityCmd) RepairDisk(id string) (string, error) {
// cmdRepairDisk represents the command used for executing macOS's diskutil to repair a disk.
// The repairDisk command requires interactive-input ("yes"/"no") but is automated with util.ExecuteCommandYes.
// * repairDisk - indicates that a disk is going to be repaired (used to fetch amount of free space)
// * id - the device identifier for the disk to be repaired
cmdRepairDisk := []string{"diskutil", "repairDisk", id}
// Execute the diskutil repairDisk command and store the output
cmdOut, err := util.ExecuteCommandYes(cmdRepairDisk, "", []string{})
if err != nil {
return cmdOut.Stdout, fmt.Errorf("diskutil: failed to run repairDisk command, stderr: [%s]: %w", cmdOut.Stderr, err)
}
return cmdOut.Stdout, nil
}
// ResizeContainer uses the macOS diskutil apfs resizeContainer command to change the size of the specific container ID.
func (d *DiskUtilityCmd) ResizeContainer(id string, size string) (string, error) {
// cmdResizeContainer represents the command used for executing macOS's diskutil to resize a container
// * apfs - specifies that a virtual APFS volume is going to be modified
// * resizeContainer - indicates that a container is going to be resized
// * id - the device identifier for the container
// * size - the size which can be in a human-readable format (e.g. "0", "110g", and "1.5t")
cmdResizeContainer := []string{"diskutil", "apfs", "resizeContainer", id, size}
// Execute the diskutil apfs resizeContainer command and store the output
cmdOut, err := util.ExecuteCommand(cmdResizeContainer, "", nil, nil)
if err != nil {
return cmdOut.Stdout, fmt.Errorf("diskutil: failed to run diskutil command to resize the container, stderr [%s]: %w", cmdOut.Stderr, err)
}
return cmdOut.Stdout, nil
}
| 108 |
ec2-macos-utils | aws | Go | package identifier
import (
"regexp"
"strings"
)
// diskIDExp is the regexp expression for device identifiers.
var diskIDExp = regexp.MustCompile("disk[0-9]+")
// ParseDiskID parses a supported disk identifier from a string.
func ParseDiskID(s string) string {
if strings.TrimSpace(s) == "" {
return ""
}
return diskIDExp.FindString(s)
}
| 18 |
ec2-macos-utils | aws | Go | package identifier
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseDiskID(t *testing.T) {
type args struct {
s string
}
tests := []struct {
name string
args args
want string
}{
{
name: "with empty input",
args: args{
s: "",
},
want: "",
},
{
name: "without device id",
args: args{
s: "this is not a device identifier",
},
want: "",
},
{
name: "with device id",
args: args{
s: "disk1",
},
want: "disk1",
},
{
name: "with full device id",
args: args{
s: "/dev/disk1",
},
want: "disk1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ParseDiskID(tt.args.s)
assert.Equal(t, tt.want, got, "parsed id should match expected")
})
}
}
| 55 |
ec2-macos-utils | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: diskutil.go
// Package mock_diskutil is a generated GoMock package.
package mock_diskutil
import (
reflect "reflect"
types "github.com/aws/ec2-macos-utils/internal/diskutil/types"
gomock "github.com/golang/mock/gomock"
)
// MockDiskUtil is a mock of DiskUtil interface.
type MockDiskUtil struct {
ctrl *gomock.Controller
recorder *MockDiskUtilMockRecorder
}
// MockDiskUtilMockRecorder is the mock recorder for MockDiskUtil.
type MockDiskUtilMockRecorder struct {
mock *MockDiskUtil
}
// NewMockDiskUtil creates a new mock instance.
func NewMockDiskUtil(ctrl *gomock.Controller) *MockDiskUtil {
mock := &MockDiskUtil{ctrl: ctrl}
mock.recorder = &MockDiskUtilMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDiskUtil) EXPECT() *MockDiskUtilMockRecorder {
return m.recorder
}
// Info mocks base method.
func (m *MockDiskUtil) Info(id string) (*types.DiskInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Info", id)
ret0, _ := ret[0].(*types.DiskInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Info indicates an expected call of Info.
func (mr *MockDiskUtilMockRecorder) Info(id interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockDiskUtil)(nil).Info), id)
}
// List mocks base method.
func (m *MockDiskUtil) List(args []string) (*types.SystemPartitions, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "List", args)
ret0, _ := ret[0].(*types.SystemPartitions)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// List indicates an expected call of List.
func (mr *MockDiskUtilMockRecorder) List(args interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockDiskUtil)(nil).List), args)
}
// RepairDisk mocks base method.
func (m *MockDiskUtil) RepairDisk(id string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RepairDisk", id)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RepairDisk indicates an expected call of RepairDisk.
func (mr *MockDiskUtilMockRecorder) RepairDisk(id interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RepairDisk", reflect.TypeOf((*MockDiskUtil)(nil).RepairDisk), id)
}
// ResizeContainer mocks base method.
func (m *MockDiskUtil) ResizeContainer(id, size string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResizeContainer", id, size)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ResizeContainer indicates an expected call of ResizeContainer.
func (mr *MockDiskUtilMockRecorder) ResizeContainer(id, size interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResizeContainer", reflect.TypeOf((*MockDiskUtil)(nil).ResizeContainer), id, size)
}
// MockAPFS is a mock of APFS interface.
type MockAPFS struct {
ctrl *gomock.Controller
recorder *MockAPFSMockRecorder
}
// MockAPFSMockRecorder is the mock recorder for MockAPFS.
type MockAPFSMockRecorder struct {
mock *MockAPFS
}
// NewMockAPFS creates a new mock instance.
func NewMockAPFS(ctrl *gomock.Controller) *MockAPFS {
mock := &MockAPFS{ctrl: ctrl}
mock.recorder = &MockAPFSMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockAPFS) EXPECT() *MockAPFSMockRecorder {
return m.recorder
}
// ResizeContainer mocks base method.
func (m *MockAPFS) ResizeContainer(id, size string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResizeContainer", id, size)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ResizeContainer indicates an expected call of ResizeContainer.
func (mr *MockAPFSMockRecorder) ResizeContainer(id, size interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResizeContainer", reflect.TypeOf((*MockAPFS)(nil).ResizeContainer), id, size)
}
| 134 |
ec2-macos-utils | aws | Go | package types
import (
"fmt"
"strings"
"github.com/aws/ec2-macos-utils/internal/diskutil/identifier"
)
// DiskInfo mirrors the output format of the command "diskutil info -plist <disk>" to store information about a disk.
type DiskInfo struct {
ContainerInfo
AESHardware bool `plist:"AESHardware"`
APFSContainerReference string `plist:"APFSContainerReference"`
APFSPhysicalStores []APFSPhysicalStore `plist:"APFSPhysicalStores"`
Bootable bool `plist:"Bootable"`
BusProtocol string `plist:"BusProtocol"`
CanBeMadeBootable bool `plist:"CanBeMadeBootable"`
CanBeMadeBootableRequiresDestroy bool `plist:"CanBeMadeBootableRequiresDestroy"`
Content string `plist:"Content"`
DeviceBlockSize int `plist:"DeviceBlockSize"`
DeviceIdentifier string `plist:"DeviceIdentifier"`
DeviceNode string `plist:"DeviceNode"`
DeviceTreePath string `plist:"DeviceTreePath"`
Ejectable bool `plist:"Ejectable"`
EjectableMediaAutomaticUnderSoftwareControl bool `plist:"EjectableMediaAutomaticUnderSoftwareControl"`
EjectableOnly bool `plist:"EjectableOnly"`
FreeSpace uint64 `plist:"FreeSpace"`
GlobalPermissionsEnabled bool `plist:"GlobalPermissionsEnabled"`
IOKitSize uint64 `plist:"IOKitSize"`
IORegistryEntryName string `plist:"IORegistryEntryName"`
Internal bool `plist:"Internal"`
LowLevelFormatSupported bool `plist:"LowLevelFormatSupported"`
MediaName string `plist:"MediaName"`
MediaType string `plist:"MediaType"`
MountPoint string `plist:"MountPoint"`
OS9DriversInstalled bool `plist:"OS9DriversInstalled"`
OSInternalMedia bool `plist:"OSInternalMedia"`
ParentWholeDisk string `plist:"ParentWholeDisk"`
PartitionMapPartition bool `plist:"PartitionMapPartition"`
RAIDMaster bool `plist:"RAIDMaster"`
RAIDSlice bool `plist:"RAIDSlice"`
Removable bool `plist:"Removable"`
RemovableMedia bool `plist:"RemovableMedia"`
RemovableMediaOrExternalDevice bool `plist:"RemovableMediaOrExternalDevice"`
SMARTDeviceSpecificKeysMayVaryNotGuaranteed *SmartDeviceInfo `plist:"SMARTDeviceSpecificKeysMayVaryNotGuaranteed"`
SMARTStatus string `plist:"SMARTStatus"`
Size uint64 `plist:"Size"`
SolidState bool `plist:"SolidState"`
SupportsGlobalPermissionsDisable bool `plist:"SupportsGlobalPermissionsDisable"`
SystemImage bool `plist:"SystemImage"`
TotalSize uint64 `plist:"TotalSize"`
VirtualOrPhysical string `plist:"VirtualOrPhysical"`
VolumeName string `plist:"VolumeName"`
VolumeSize uint64 `plist:"VolumeSize"`
WholeDisk bool `plist:"WholeDisk"`
Writable bool `plist:"Writable"`
WritableMedia bool `plist:"WritableMedia"`
WritableVolume bool `plist:"WritableVolume"`
}
// IsPhysical checks if the disk is physical or virtual.
func (d *DiskInfo) IsPhysical() bool {
return strings.EqualFold(d.VirtualOrPhysical, "Physical")
}
// ParentDeviceID gets the parent device identifier for a physical store.
func (d *DiskInfo) ParentDeviceID() (string, error) {
// APFS Containers and Volumes are virtualized and should have a physical store which represents a physical disk
if d.APFSPhysicalStores == nil {
return "", fmt.Errorf("no physical stores found in disk")
}
// Check if there's more than one Physical Store in the disk's info. Having more than one APFS Physical Store
// is unexpected and the common case shouldn't violate this.
//
// Note: more than one physical store can indicate a fusion drive - https://support.apple.com/en-us/HT202574.
if len(d.APFSPhysicalStores) != 1 {
return "", fmt.Errorf("expected 1 physical store but got [%d]", len(d.APFSPhysicalStores))
}
id := identifier.ParseDiskID(d.APFSPhysicalStores[0].DeviceIdentifier)
if id == "" {
return "", fmt.Errorf("physical store [%s] does not contain the expected expression \"disk[0-9]+\"",
d.APFSPhysicalStores[0].DeviceIdentifier)
}
return id, nil
}
// ContainerInfo expands on DiskInfo to add extra information for APFS Containers.
type ContainerInfo struct {
APFSContainerFree uint64 `plist:"APFSContainerFree"`
APFSContainerSize uint64 `plist:"APFSContainerSize"`
APFSSnapshot bool `plist:"APFSSnapshot"`
APFSSnapshotName string `plist:"APFSSnapshotName"`
APFSSnapshotUUID string `plist:"APFSSnapshotUUID"`
APFSVolumeGroupID string `plist:"APFSVolumeGroupID"`
BooterDeviceIdentifier string `plist:"BooterDeviceIdentifier"`
DiskUUID string `plist:"DiskUUID"`
Encryption bool `plist:"Encryption"`
EncryptionThisVolumeProper bool `plist:"EncryptionThisVolumeProper"`
FileVault bool `plist:"FileVault"`
FilesystemName string `plist:"FilesystemName"`
FilesystemType string `plist:"FilesystemType"`
FilesystemUserVisibleName string `plist:"FilesystemUserVisibleName"`
Fusion bool `plist:"Fusion"`
Locked bool `plist:"Locked"`
MacOSSystemAPFSEFIDriverVersion uint64 `plist:"MacOSSystemAPFSEFIDriverVersion"`
RecoveryDeviceIdentifier string `plist:"RecoveryDeviceIdentifier"`
Sealed string `plist:"Sealed"`
VolumeAllocationBlockSize int `plist:"VolumeAllocationBlockSize"`
VolumeUUID string `plist:"VolumeUUID"`
}
// APFSPhysicalStore represents the physical device usually relating to synthesized virtual devices.
type APFSPhysicalStore struct {
DeviceIdentifier string `plist:"APFSPhysicalStore"`
}
// SmartDeviceInfo stores SMART information for devices that are SMART-enabled (e.g. device health or problems).
type SmartDeviceInfo struct {
AvailableSpare int `plist:"AVAILABLE_SPARE"`
AvailableSpareThreshold int `plist:"AVAILABLE_SPARE_THRESHOLD"`
ControllerBusyTime0 int `plist:"CONTROLLER_BUSY_TIME_0"`
ControllerBusyTime1 int `plist:"CONTROLLER_BUSY_TIME_1"`
DataUnitsRead0 int `plist:"DATA_UNITS_READ_0"`
DataUnitsRead1 int `plist:"DATA_UNITS_READ_1"`
DataUnitsWritten0 int `plist:"DATA_UNITS_WRITTEN_0"`
DataUnitsWritten1 int `plist:"DATA_UNITS_WRITTEN_1"`
HostReadCommands0 int `plist:"HOST_READ_COMMANDS_0"`
HostReadCommands1 int `plist:"HOST_READ_COMMANDS_1"`
HostWriteCommands0 int `plist:"HOST_WRITE_COMMANDS_0"`
HostWriteCommands1 int `plist:"HOST_WRITE_COMMANDS_1"`
MediaErrors0 int `plist:"MEDIA_ERRORS_0"`
MediaErrors1 int `plist:"MEDIA_ERRORS_1"`
NumErrorInfoLogEntries0 int `plist:"NUM_ERROR_INFO_LOG_ENTRIES_0"`
NumErrorInfoLogEntries1 int `plist:"NUM_ERROR_INFO_LOG_ENTRIES_1"`
PercentageUsed int `plist:"PERCENTAGE_USED"`
PowerCycles0 int `plist:"POWER_CYCLES_0"`
PowerCycles1 int `plist:"POWER_CYCLES_1"`
PowerOnHours0 int `plist:"POWER_ON_HOURS_0"`
PowerOnHours1 int `plist:"POWER_ON_HOURS_1"`
Temperature int `plist:"TEMPERATURE"`
UnsafeShutdowns0 int `plist:"UNSAFE_SHUTDOWNS_0"`
UnsafeShutdowns1 int `plist:"UNSAFE_SHUTDOWNS_1"`
}
| 148 |
ec2-macos-utils | aws | Go | package types
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestDiskInfo_parentDeviceID(t *testing.T) {
type args struct {
disk *DiskInfo
}
tests := []struct {
name string
args args
wantId string
wantErr bool
}{
{
name: "Bad case: no APFS physical stores",
args: args{
disk: &DiskInfo{
APFSPhysicalStores: nil,
},
},
wantId: "",
wantErr: true,
},
{
name: "Bad case: more than 1 APFS physical store",
args: args{
disk: &DiskInfo{
APFSPhysicalStores: []APFSPhysicalStore{
{DeviceIdentifier: "disk0s2"},
{DeviceIdentifier: "disk1s2"},
},
},
},
wantId: "",
wantErr: true,
},
{
name: "Bad case: APFS physical store doesn't have expected device identifier format",
args: args{
disk: &DiskInfo{
APFSPhysicalStores: []APFSPhysicalStore{
{DeviceIdentifier: "device0s2"},
},
DeviceIdentifier: "disk2",
},
},
wantId: "",
wantErr: true,
},
{
name: "Good case: one APFS physical store with expected device identifier format",
args: args{
disk: &DiskInfo{
APFSPhysicalStores: []APFSPhysicalStore{
{DeviceIdentifier: "disk0s2"},
},
DeviceIdentifier: "disk2",
},
},
wantId: "disk0",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotId, err := tt.args.disk.ParentDeviceID()
assert.Equal(t, tt.wantId, gotId, "should have matching parent device ID")
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
| 83 |
ec2-macos-utils | aws | Go | package types
import (
"fmt"
"strings"
)
// SystemPartitions mirrors the output format of the command "diskutil list -plist" to store all disk
// and partition information.
type SystemPartitions struct {
AllDisks []string `plist:"AllDisks"`
AllDisksAndPartitions []DiskPart `plist:"AllDisksAndPartitions"`
VolumesFromDisks []string `plist:"VolumesFromDisks"`
WholeDisks []string `plist:"WholeDisks"`
}
// AvailableDiskSpace calculates the amount of unallocated disk space for a specific device id.
func (p *SystemPartitions) AvailableDiskSpace(id string) (uint64, error) {
// Loop through all the partitions in the system and attempt to find the struct with a matching ID
var target *DiskPart
for i, disk := range p.AllDisksAndPartitions {
if strings.EqualFold(disk.DeviceIdentifier, id) {
target = &p.AllDisksAndPartitions[i]
break
}
}
// Ensure a DiskPart struct was found
if target == nil {
return 0, fmt.Errorf("no partition information found for ID [%s]", id)
}
// Sum up disk's current allocations.
var allocated uint64
for _, p := range target.Partitions {
allocated += p.Size
}
return target.Size - allocated, nil
}
// APFSPhysicalStoreID represents the physical device usually relating to synthesized virtual devices.
type APFSPhysicalStoreID struct {
DeviceIdentifier string `plist:"DeviceIdentifier"`
}
// DiskPart represents a subset of information from DiskInfo.
type DiskPart struct {
APFSPhysicalStores []APFSPhysicalStoreID `plist:"APFSPhysicalStores"`
APFSVolumes []APFSVolume `plist:"APFSVolumes"`
Content string `plist:"Content"`
DeviceIdentifier string `plist:"DeviceIdentifier"`
OSInternal bool `plist:"OSInternal"`
Partitions []Partition `plist:"Partitions"`
Size uint64 `plist:"Size"`
}
// Partition stores relevant information about a partition in macOS.
type Partition struct {
Content string `plist:"Content"`
DeviceIdentifier string `plist:"DeviceIdentifier"`
DiskUUID string `plist:"DiskUUID"`
Size uint64 `plist:"Size"`
VolumeName string `plist:"VolumeName"`
VolumeUUID string `plist:"VolumeUUID"`
}
// APFSVolume represents a macOS APFS Volume with relevant information.
type APFSVolume struct {
DeviceIdentifier string `plist:"DeviceIdentifier"`
DiskUUID string `plist:"DiskUUID"`
MountPoint string `plist:"MountPoint"`
MountedSnapshots []Snapshot `plist:"MountedSnapshots"`
OSInternal bool `plist:"OSInternal"`
Size uint64 `plist:"Size"`
VolumeName string `plist:"VolumeName"`
VolumeUUID string `plist:"VolumeUUID"`
}
// Snapshot stores relevant information about a snapshot in macOS.
type Snapshot struct {
Sealed string `plist:"Sealed"`
SnapshotBSD string `plist:"SnapshotBSD"`
SnapshotMountPoint string `plist:"SnapshotMountPoint"`
SnapshotName string `plist:"SnapshotName"`
SnapshotUUID string `plist:"SnapshotUUID"`
}
| 88 |
ec2-macos-utils | aws | Go | package types
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSystemPartitions_AvailableDiskSpace_WithoutTargetDisk(t *testing.T) {
const (
testDiskID = "disk3"
// should see 0 since testDiskID isn't in AllDisksAndPartitions
expectedAvailableSize uint64 = 0
)
p := &SystemPartitions{
AllDisksAndPartitions: []DiskPart{
{DeviceIdentifier: "disk0"},
{DeviceIdentifier: "disk1"},
{DeviceIdentifier: "disk2"},
},
}
actual, err := p.AvailableDiskSpace(testDiskID)
assert.Error(t, err, "shouldn't be able to find disk in partitions")
assert.Equal(t, expectedAvailableSize, actual, "shouldn't return anything since the disk doesn't exist")
}
func TestSystemPartitions_AvailableDiskSpace_GoodDisk(t *testing.T) {
const (
testDiskID = "disk1"
// total disk size
diskSize uint64 = 2_000_000
// individual partition space occupied
partSize uint64 = 250_000
// should see: diskSize - (2 * partSize)
expectedAvailableSize uint64 = 1_500_000
)
p := &SystemPartitions{
AllDisksAndPartitions: []DiskPart{
// Non-targeted disk, should be skipped
{DeviceIdentifier: "disk0"},
{
DeviceIdentifier: "disk1",
Size: diskSize,
Partitions: []Partition{
{Size: partSize},
{Size: partSize},
},
},
},
}
actual, err := p.AvailableDiskSpace(testDiskID)
assert.NoError(t, err, "should be able to calculate free space with valid data")
assert.Equal(t, expectedAvailableSize, actual, "should have calculated free space based on partitions")
}
| 61 |
ec2-macos-utils | aws | Go | package system
import (
"fmt"
"github.com/Masterminds/semver"
)
// Release is used to define macOS releases in an enumerated constant (e.g. Mojave, Catalina, BigSur)
type Release uint8
const (
Unknown Release = iota
Mojave
Catalina
BigSur
Monterey
Ventura
CompatMode
)
func (r Release) String() string {
switch r {
case Mojave:
return "Mojave"
case Catalina:
return "Catalina"
case BigSur:
return "Big Sur"
case Monterey:
return "Monterey"
case Ventura:
return "Ventura"
case CompatMode:
return "Compatability Mode"
default:
return "unknown"
}
}
var (
// mojaveConstraints are the constraints used to identify Mojave versions (10.14.x).
mojaveConstraints = mustInitConstraint(semver.NewConstraint("~10.14"))
// catalinaConstraints are the constraints used to identify Catalina versions (10.15.x).
catalinaConstraints = mustInitConstraint(semver.NewConstraint("~10.15"))
// bigSurConstraints are the constraints used to identify BigSur versions (11.x.x).
bigSurConstraints = mustInitConstraint(semver.NewConstraint("~11"))
// montereyConstraints are the constraints used to identify Monterey versions (12.x.x).
montereyConstraints = mustInitConstraint(semver.NewConstraint("~12"))
// venturaConstraints are the constraints used to identify Ventura versions (13.x.x).
venturaConstraints = mustInitConstraint(semver.NewConstraint("~13"))
// compatModeConstraints are the constraints used to identify macOS Big Sur and later. This version is returned
// when the system is in compat mode (SYSTEM_VERSION_COMPAT=1).
compatModeConstraints = mustInitConstraint(semver.NewConstraint("~10.16"))
)
// mustInitConstraint ensures that a semver.Constraints can be initialized and used.
func mustInitConstraint(c *semver.Constraints, err error) *semver.Constraints {
if err != nil {
panic(fmt.Errorf("must initialize semver constraint: %w", err))
}
return c
}
// Product identifies a macOS release and product version (e.g. Big Sur 11.x).
type Product struct {
Release
Version semver.Version
}
func (p Product) String() string {
return fmt.Sprintf("macOS %s %s", p.Release, p.Version.String())
}
// newProduct initializes a new Product given the version string as input. It attempts to parse the version into a new
// semver.Version and then checks the version's constraints to identify the Release.
func newProduct(version string) (*Product, error) {
ver, err := semver.NewVersion(version)
if err != nil {
return nil, err
}
release := getVersionRelease(*ver)
product := &Product{
Release: release,
Version: *ver,
}
return product, nil
}
// getVersionRelease checks all known release constraints to determine which Release the version belongs to.
func getVersionRelease(version semver.Version) Release {
switch {
case mojaveConstraints.Check(&version):
return Mojave
case catalinaConstraints.Check(&version):
return Catalina
case bigSurConstraints.Check(&version):
return BigSur
case montereyConstraints.Check(&version):
return Monterey
case venturaConstraints.Check(&version):
return Ventura
case compatModeConstraints.Check(&version):
return CompatMode
default:
return Unknown
}
}
| 112 |
ec2-macos-utils | aws | Go | // Package system provides the functionality necessary for interacting with the macOS system.
package system
import (
"fmt"
"io"
"os"
"howett.net/plist"
)
const (
// versionPath is the path on the root filesystem to the SystemVersion plist
versionPath = "/System/Library/CoreServices/SystemVersion.plist"
// dotVersionPath is the path to the symlink that directly references versionPath and bypasses the compatibility
// mode that was introduced with macOS 11.0.
dotVersionPath = "/System/Library/CoreServices/.SystemVersionPlatform.plist"
// dotVersionSwitch is the product version number returned by macOS when the system is in compat mode
// (SYSTEM_VERSION_COMPAT=1). If this version is returned, dotVersionPath should be read to bypass compat mode.
dotVersionSwitch = "10.16"
)
// System correlates VersionInfo with a Product.
type System struct {
versionInfo *VersionInfo
product *Product
}
func (sys *System) Product() *Product {
return sys.product
}
// Scan reads the VersionInfo and creates a new System struct from that and the associated Product.
func Scan() (*System, error) {
version, err := readVersion()
if err != nil {
return nil, err
}
product, err := version.Product()
if err != nil {
return nil, err
}
system := &System{
versionInfo: version,
product: product,
}
return system, nil
}
// VersionInfo mirrors the raw data found in the SystemVersion plist file.
type VersionInfo struct {
ProductBuildVersion string `plist:"ProductBuildVersion"`
ProductCopyright string `plist:"ProductCopyright"`
ProductName string `plist:"ProductName"`
ProductUserVisibleVersion string `plist:"ProductUserVisibleVersion"`
ProductVersion string `plist:"ProductVersion"`
IOSSupportVersion string `plist:"iOSSupportVersion"`
}
// Product determines the specific product that the VersionInfo.ProductVersion is associated with.
func (v *VersionInfo) Product() (*Product, error) {
return newProduct(v.ProductVersion)
}
// decodeVersionInfo attempts to decode the raw data from the reader into a new VersionInfo struct.
func decodeVersionInfo(reader io.ReadSeeker) (version *VersionInfo, err error) {
// Create a reader from the raw data and create a new decoder
version = &VersionInfo{}
decoder := plist.NewDecoder(reader)
// Decode the system version plist into the VersionInfo struct
err = decoder.Decode(version)
if err != nil {
return nil, fmt.Errorf("system failed to decode contents of reader: %w", err)
}
return version, nil
}
// readVersion reads the SystemVersion plist data from disk (versionPath). If "SYSTEM_VERSION_COMPAT" is enabled, it will instead
// read from dotVersionPath to bypass macOS's compat mode.
func readVersion() (*VersionInfo, error) {
// Read the version info from the standard file path
version, err := readProductVersionFile(versionPath)
if err != nil {
return nil, err
}
// If the returned product version is in compat mode, read the version info from the dot file to bypass compat mode.
if version.ProductVersion == dotVersionSwitch {
return readProductVersionFile(dotVersionPath)
}
return version, nil
}
// readProductVersion opens the given file and attempts to decode it as VersionInfo.
func readProductVersionFile(path string) (*VersionInfo, error) {
// Open the SystemVersion.plist file
versionFile, err := os.Open(path)
if err != nil {
return nil, err
}
defer versionFile.Close()
// Get the VersionInfo from the reader
version, err := decodeVersionInfo(versionFile)
if err != nil {
return nil, err
}
return version, nil
}
| 119 |
ec2-macos-utils | aws | Go | package util
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"syscall"
)
// CommandOutput wraps the output from an exec command as strings.
type CommandOutput struct {
Stdout string
Stderr string
}
// ExecuteCommand executes the command and returns Stdout and Stderr as strings.
func ExecuteCommand(c []string, runAsUser string, envVars []string, stdin io.ReadCloser) (output CommandOutput, err error) {
// Separate name and args, plus catch a few error cases
var name string
var args []string
// Check the empty struct case ([]string{}) for the command
if len(c) == 0 {
return CommandOutput{}, fmt.Errorf("must provide a command")
}
// Set the name of the command and check if args are also provided
name = c[0]
if len(c) > 1 {
args = c[1:]
}
// Set command and create output buffers
cmd := exec.Command(name, args...)
var stdoutb, stderrb bytes.Buffer
cmd.Stdout = &stdoutb
cmd.Stderr = &stderrb
// Set command stdin if the stdin parameter is provided
if stdin != nil {
cmd.Stdin = stdin
}
// Set runAsUser, if defined, otherwise will run as root
if runAsUser != "" {
uid, gid, err := getUIDandGID(runAsUser)
if err != nil {
return CommandOutput{Stdout: stdoutb.String(), Stderr: stderrb.String()}, fmt.Errorf("error looking up user: %s\n", err)
}
cmd.SysProcAttr = &syscall.SysProcAttr{}
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)}
}
// Append environment variables
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, envVars...)
// Start the command's execution
if err = cmd.Start(); err != nil {
return CommandOutput{Stdout: stdoutb.String(), Stderr: stderrb.String()}, fmt.Errorf("error starting specified command: %w", err)
}
// Wait for the command to exit
if err = cmd.Wait(); err != nil {
return CommandOutput{Stdout: stdoutb.String(), Stderr: stderrb.String()}, fmt.Errorf("error waiting for specified command to exit: %w", err)
}
return CommandOutput{Stdout: stdoutb.String(), Stderr: stderrb.String()}, err
}
// ExecuteCommandYes wraps ExecuteCommand with the yes binary in order to bypass user input states in automation.
func ExecuteCommandYes(c []string, runAsUser string, envVars []string) (output CommandOutput, err error) {
// Set exec commands, one for yes and another for the specified command
cmdYes := exec.Command("/usr/bin/yes")
// Pipe cmdYes into cmd
stdin, err := cmdYes.StdoutPipe()
if err != nil {
return CommandOutput{}, fmt.Errorf("error creating pipe between commands")
}
// Start the command to run /usr/bin/yes
if err = cmdYes.Start(); err != nil {
return CommandOutput{}, fmt.Errorf("error starting /usr/bin/yes command: %w", err)
}
return ExecuteCommand(c, runAsUser, envVars, stdin)
}
// getUIDandGID takes a username and returns the uid and gid for that user.
// While testing UID/GID lookup for a user, it was found that the user.Lookup() function does not always return
// information for a new user on first boot. In the case that user.Lookup() fails, try dscacheutil, which has a
// higher success rate. If that fails, return an error. Any successful case returns the UID and GID as ints.
func getUIDandGID(username string) (uid int, gid int, err error) {
var uidstr, gidstr string
// Preference is user.Lookup(), if it works
u, lookuperr := user.Lookup(username)
if lookuperr != nil {
// user.Lookup() has failed, second try by checking the DS cache
out, cmderr := ExecuteCommand([]string{"dscacheutil", "-q", "user", "-a", "name", username}, "", []string{}, nil)
if cmderr != nil {
// dscacheutil has failed with an error
return 0, 0, fmt.Errorf("error while looking up user %s: \n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil Stderr: %s\n",
username, lookuperr, cmderr, out.Stderr)
}
// Check length of Stdout - dscacheutil returns nothing if user is not found
if len(out.Stdout) > 0 { // dscacheutil has returned something
// Command output from dscacheutil should look like:
// name: ec2-user
// password: ********
// uid: 501
// gid: 20
// dir: /Users/ec2-user
// shell: /bin/bash
// gecos: ec2-user
dsSplit := strings.Split(out.Stdout, "\n") // split on newline to separate uid and gid
for _, e := range dsSplit {
eSplit := strings.Fields(e) // split into fields to separate tag with id
// Find UID and GID and set them
if strings.HasPrefix(e, "uid") {
if len(eSplit) != 2 {
// dscacheutil has returned some sort of weird output that can't be split
return 0, 0, fmt.Errorf("error while splitting dscacheutil uid output for user %s: %s\n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil Stderr: %s\n",
username, out.Stdout, lookuperr, cmderr, out.Stderr)
}
uidstr = eSplit[1]
} else if strings.HasPrefix(e, "gid") {
if len(eSplit) != 2 {
// dscacheutil has returned some sort of weird output that can't be split
return 0, 0, fmt.Errorf("error while splitting dscacheutil gid output for user %s: %s\n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil Stderr: %s\n",
username, out.Stdout, lookuperr, cmderr, out.Stderr)
}
gidstr = eSplit[1]
}
}
} else {
// dscacheutil has returned nothing, user is not found
return 0, 0, fmt.Errorf("user %s not found: \n"+
"user.Lookup() error: %s \ndscacheutil error: %s\ndscacheutil Stderr: %s\n",
username, lookuperr, cmderr, out.Stderr)
}
} else {
// user.Lookup() was successful, use the returned UID/GID
uidstr = u.Uid
gidstr = u.Gid
}
// Convert UID and GID to int
uid, err = strconv.Atoi(uidstr)
if err != nil {
return 0, 0, fmt.Errorf("error while converting UID to int: %s\n", err)
}
gid, err = strconv.Atoi(gidstr)
if err != nil {
return 0, 0, fmt.Errorf("error while converting GID to int: %s\n", err)
}
return uid, gid, nil
}
| 168 |
eks-anywhere | aws | Go | package main
import (
"os"
"github.com/aws/eks-anywhere/cmd/eks-a-tool/cmd"
)
func main() {
if cmd.Execute() == nil {
os.Exit(0)
}
os.Exit(-1)
}
| 15 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var cloudstackCmd = &cobra.Command{
Use: "cloudstack",
Short: "CloudStack commands",
Long: "Use eks-a-tool cloudstack to run cloudstack utilities",
}
func init() {
rootCmd.AddCommand(cloudstackCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var cloudstackRmCmd = &cobra.Command{
Use: "rm",
Short: "CloudStack rm commands",
Long: "Use eks-a-tool cloudstack rm to run cloudstack rm utilities",
}
func init() {
cloudstackCmd.AddCommand(cloudstackRmCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/test/cleanup"
"github.com/aws/eks-anywhere/pkg/validations"
)
const dryRunFlag = "dry-run"
var cloudstackRmVmsCmd = &cobra.Command{
Use: "vms <cluster-name>",
PreRun: prerunCmdBindFlags,
Short: "CloudStack rmvms command",
Long: "This command removes vms associated with a cluster name",
RunE: func(cmd *cobra.Command, args []string) error {
var err error
var clusterName string
clusterName, err = validations.ValidateClusterNameArg(args)
if err != nil {
return err
}
err = cleanup.CleanUpCloudstackTestResources(cmd.Context(), clusterName, viper.GetBool(dryRunFlag))
if err != nil {
log.Fatalf("Error removing vms: %v", err)
}
return nil
},
}
func init() {
var err error
cloudstackRmCmd.AddCommand(cloudstackRmVmsCmd)
cloudstackRmVmsCmd.Flags().Bool(dryRunFlag, false, "Dry run flag")
err = viper.BindPFlags(cloudstackRmVmsCmd.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
| 46 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var conformanceCmd = &cobra.Command{
Use: "conformance",
Short: "Conformance tests",
Long: "Use eks-a-tool conformance to run conformance tests",
}
func init() {
rootCmd.AddCommand(conformanceCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/pkg/conformance"
)
var conformanceDownloadCmd = &cobra.Command{
Use: "download",
Short: "Conformance download command",
Long: "This command downloads the conformance test suite",
RunE: func(cmd *cobra.Command, args []string) error {
err := conformance.Download()
if err != nil {
log.Fatalf("Error downloading conformance: %v", err)
}
return nil
},
}
func init() {
conformanceCmd.AddCommand(conformanceDownloadCmd)
err := viper.BindPFlags(conformanceDownloadCmd.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
| 32 |
eks-anywhere | aws | Go | package cmd
import (
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/pkg/conformance"
)
var conformanceTestCmd = &cobra.Command{
Use: "test <cluster-context>",
Short: "Conformance test command",
Long: "This command run the conformance tests",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
log.Fatalf("Error running sonobuoy: cluster context not provided")
}
results, err := conformance.RunTests(cmd.Context(), args[0])
if err != nil {
log.Fatalf("Error running sonobuoy: %v", err)
}
log.Printf("Conformance Test results:\n %v", results)
return nil
},
}
func init() {
conformanceCmd.AddCommand(conformanceTestCmd)
err := viper.BindPFlags(conformanceTestCmd.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
| 36 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"path"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
anywhere "github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd"
)
const fmTemplate = `---
title: "%s"
linkTitle: "%s"
---
`
var cmdDocPath string
var docgenCmd = &cobra.Command{
Use: "docgen",
Short: "Generate the documentation for the CLI commands",
Long: "Use eks-a-tool docgen to auto generate CLI commands documentation",
Hidden: true,
RunE: docgenCmdRun,
}
func init() {
docgenCmd.Flags().StringVar(&cmdDocPath, "path", "./docs/content/en/docs/reference/eksctl", "Path to write the generated documentation to")
rootCmd.AddCommand(docgenCmd)
}
func docgenCmdRun(_ *cobra.Command, _ []string) error {
anywhereRootCmd := anywhere.RootCmd()
anywhereRootCmd.DisableAutoGenTag = true
if err := doc.GenMarkdownTreeCustom(anywhereRootCmd, cmdDocPath, filePrepender, linkHandler); err != nil {
return fmt.Errorf("error generating markdown doc from eksctl-anywhere root cmd: %v", err)
}
return nil
}
func filePrepender(filename string) string {
name := filepath.Base(filename)
base := strings.TrimSuffix(name, path.Ext(name))
title := strings.Replace(base, "_", " ", -1)
return fmt.Sprintf(fmTemplate, title, title)
}
func linkHandler(name string) string {
base := strings.TrimSuffix(name, path.Ext(name))
base = strings.Replace(base, "(", "", -1)
base = strings.Replace(base, ")", "", -1)
return "../" + strings.ToLower(base) + "/"
}
| 59 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var nutanixCmd = &cobra.Command{
Use: "nutanix",
Short: "Nutanix commands",
Long: "Use eks-a-tool nutanix to run nutanix utilities",
}
func init() {
rootCmd.AddCommand(nutanixCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var nutanixRmCmd = &cobra.Command{
Use: "rm",
Short: "Nutanix rm commands",
Long: "Use eks-a-tool nutanix rm to run nutanix rm utilities",
}
func init() {
nutanixCmd.AddCommand(nutanixRmCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/internal/test/cleanup"
"github.com/aws/eks-anywhere/pkg/validations"
)
const (
endpointFlag = "endpoint"
portFlag = "port"
insecureFlag = "insecure"
ignoreErrorsFlag = "ignoreErrors"
)
var nutanixRmVmsCmd = &cobra.Command{
Use: "vms <cluster-name>",
PreRun: prerunCmdBindFlags,
Short: "Nutanix rmvms command",
Long: "This command removes vms associated with a cluster name",
RunE: func(cmd *cobra.Command, args []string) error {
clusterName, err := validations.ValidateClusterNameArg(args)
if err != nil {
return err
}
insecure := false
if viper.IsSet(insecureFlag) {
insecure = true
}
err = cleanup.NutanixTestResourcesCleanup(cmd.Context(), clusterName, viper.GetString(endpointFlag), viper.GetString(portFlag), insecure, viper.GetBool(ignoreErrorsFlag))
if err != nil {
log.Fatalf("Error removing vms: %v", err)
}
return nil
},
}
func init() {
nutanixRmCmd.AddCommand(nutanixRmVmsCmd)
nutanixRmVmsCmd.Flags().StringP(endpointFlag, "e", "", "specify Nutanix Prism endpoint (REQUIRED)")
nutanixRmVmsCmd.Flags().StringP(portFlag, "p", "9440", "specify Nutanix Prism port (default: 9440)")
nutanixRmVmsCmd.Flags().StringP(insecureFlag, "k", "false", "skip TLS when contacting Prism APIs (default: false)")
nutanixRmVmsCmd.Flags().String(ignoreErrorsFlag, "true", "ignore APIs errors when deleting VMs (default: true)")
if err := nutanixRmVmsCmd.MarkFlagRequired(endpointFlag); err != nil {
log.Fatalf("Marking flag '%s' as required", endpointFlag)
}
}
| 54 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/logger"
)
var rootCmd = &cobra.Command{
Use: "eks-a-tool",
Short: "Amazon EKS Anywhere Tool",
Long: `Use eks-a-tool to validate your cluster`,
PersistentPreRun: rootPersistentPreRun,
}
func init() {
rootCmd.PersistentFlags().IntP("verbosity", "v", 0, "Set the log level verbosity")
if err := viper.BindPFlags(rootCmd.PersistentFlags()); err != nil {
log.Fatalf("failed to bind flags for root: %v", err)
}
}
func prerunCmdBindFlags(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
}
func rootPersistentPreRun(cmd *cobra.Command, args []string) {
if err := initLogger(); err != nil {
log.Fatal(err)
}
}
func initLogger() error {
if err := logger.Init(logger.Options{
Level: viper.GetInt("verbosity"),
}); err != nil {
return fmt.Errorf("failed init zap logger in root command: %v", err)
}
return nil
}
func Execute() error {
return rootCmd.Execute()
}
| 56 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/networkutils"
)
var uniqueIpCmd = &cobra.Command{
Use: "unique-ip",
Short: "Unique IP",
Long: "Generate a random unique IP to be used for control plane endpoint ip",
PreRun: preRunUniqueIp,
RunE: func(cmd *cobra.Command, args []string) error {
uniqueIp, err := generateUniqueIP(cmd.Context())
if err != nil {
log.Fatalf("Error generating unique ip: %v", err)
}
fmt.Println(uniqueIp)
return nil
},
}
func init() {
rootCmd.AddCommand(uniqueIpCmd)
uniqueIpCmd.Flags().StringP("cidr", "c", "", "CIDR range for the unique IP")
err := uniqueIpCmd.MarkFlagRequired("cidr")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func preRunUniqueIp(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
}
func generateUniqueIP(ctx context.Context) (string, error) {
cidr := viper.GetString("cidr")
ipgen := networkutils.NewIPGenerator(&networkutils.DefaultNetClient{})
return ipgen.GenerateUniqueIP(cidr)
}
| 53 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
var validateClusterCmd = &cobra.Command{
Use: "validate-cluster <cluster-name> <kubeconfig>",
Short: "Validate eks-a cluster command",
Long: "Use eks-a-tool validate eks-anywhere cluster",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
log.Fatalf("Some args are missing. See usage for required arguments")
}
clusterName, err := validations.ValidateClusterNameArg(args)
if err != nil {
log.Fatalf("Error validating the cluster: %v", err)
}
kubeconfig := args[1]
if !validations.FileExists(kubeconfig) {
log.Fatalf("Error validating the cluster: kubeconfig file %s not found", kubeconfig)
}
cluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: kubeconfig,
}
err = validateCluster(cmd.Context(), cluster, clusterName)
if err != nil {
log.Fatalf("Error validating the cluster: %v", err)
}
return nil
},
}
func init() {
rootCmd.AddCommand(validateClusterCmd)
}
func validateCluster(ctx context.Context, cluster *types.Cluster, clusterName string) error {
executableBuilder, close, err := executables.InitInDockerExecutablesBuilder(ctx, executables.DefaultEksaImage())
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
defer close.CheckErr(ctx)
kubectl := executableBuilder.BuildKubectlExecutable()
err = kubectl.ValidateNodes(ctx, cluster.KubeconfigFile)
if err != nil {
return err
}
err = kubectl.ValidateControlPlaneNodes(ctx, cluster, clusterName)
if err != nil {
return err
}
err = kubectl.ValidateWorkerNodes(ctx, clusterName, cluster.KubeconfigFile)
if err != nil {
return err
}
return kubectl.ValidatePods(ctx, cluster.KubeconfigFile)
}
| 68 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/executables"
)
var versionsCmd = &cobra.Command{
Use: "versions",
Short: "Get cluster versions",
Long: "Get the versions of images in cluster",
RunE: func(cmd *cobra.Command, args []string) error {
err := versions(cmd.Context())
if err != nil {
log.Fatalf("Error getting image versions: %v", err)
}
return nil
},
}
func init() {
rootCmd.AddCommand(versionsCmd)
}
func versions(ctx context.Context) error {
executableBuilder, close, err := executables.InitInDockerExecutablesBuilder(ctx, executables.DefaultEksaImage())
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
defer close.CheckErr(ctx)
kubectl := executableBuilder.BuildKubectlExecutable()
return kubectl.ListCluster(ctx)
}
| 40 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var vsphereCmd = &cobra.Command{
Use: "vsphere",
Short: "VSphere commands",
Long: "Use eks-a-tool vsphere to run vsphere utilities",
}
func init() {
rootCmd.AddCommand(vsphereCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/validations"
)
var autofillCmd = &cobra.Command{
Use: "autofill",
Short: "Autofill provider config",
Long: "Fills provider config with values set in environment variables",
PreRun: preRunAutofill,
RunE: func(cmd *cobra.Command, args []string) error {
err := autofill(cmd.Context())
if err != nil {
log.Fatalf("Error filling the provider config: %v", err)
}
return nil
},
}
func init() {
vsphereCmd.AddCommand(autofillCmd)
autofillCmd.Flags().StringP("filename", "f", "", "Cluster config yaml filepath")
err := autofillCmd.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func preRunAutofill(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
}
func autofill(ctx context.Context) error {
clusterConfigFileName := viper.GetString("filename")
clusterConfigFileExist := validations.FileExists(clusterConfigFileName)
if !clusterConfigFileExist {
return fmt.Errorf("the cluster config file %s does not exist", clusterConfigFileName)
}
clusterConfig, err := v1alpha1.GetAndValidateClusterConfig(clusterConfigFileName)
if err != nil {
return fmt.Errorf("unable to get cluster config from file: %v", err)
}
datacenterConfig, err := v1alpha1.GetVSphereDatacenterConfig(clusterConfigFileName)
if err != nil {
return fmt.Errorf("unable to get datacenter config from file: %v", err)
}
machineConfig, err := v1alpha1.GetVSphereMachineConfigs(clusterConfigFileName)
if err != nil {
return fmt.Errorf("unable to get machine config from file: %v", err)
}
controlPlaneMachineConfig := machineConfig[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
workerMachineConfig := machineConfig[clusterConfig.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name]
var updatedFields []string
updateField := func(envName string, field *string) {
if value, set := os.LookupEnv(envName); set && len(value) > 0 {
*field = value
updatedFields = append(updatedFields, envName)
}
}
updateFieldInt := func(envName string, field *int) {
if value, set := os.LookupEnv(envName); set && len(value) > 0 {
val, _ := strconv.Atoi(value)
*field = val
updatedFields = append(updatedFields, envName)
}
}
tlsInsecure := strconv.FormatBool(datacenterConfig.Spec.Insecure)
updateField("CONTROL_PLANE_ENDPOINT_IP", &clusterConfig.Spec.ControlPlaneConfiguration.Endpoint.Host)
updateField("DATACENTER", &datacenterConfig.Spec.Datacenter)
updateField("NETWORK", &datacenterConfig.Spec.Network)
updateField("SERVER", &datacenterConfig.Spec.Server)
updateField("INSECURE", &tlsInsecure)
updateField("THUMBPRINT", &datacenterConfig.Spec.Thumbprint)
updateFieldInt("CONTROL_PLANE_COUNT", &clusterConfig.Spec.ControlPlaneConfiguration.Count)
updateFieldInt("WORKER_NODE_COUNT", clusterConfig.Spec.WorkerNodeGroupConfigurations[0].Count)
updateField("SSH_AUTHORIZED_KEY", &controlPlaneMachineConfig.Spec.Users[0].SshAuthorizedKeys[0])
updateField("SSH_USERNAME", &controlPlaneMachineConfig.Spec.Users[0].Name)
updateField("TEMPLATE", &controlPlaneMachineConfig.Spec.Template)
updateField("DATASTORE", &controlPlaneMachineConfig.Spec.Datastore)
updateField("FOLDER", &controlPlaneMachineConfig.Spec.Folder)
updateField("RESOURCE_POOL", &controlPlaneMachineConfig.Spec.ResourcePool)
updateField("STORAGE_POLICY_NAME", &controlPlaneMachineConfig.Spec.StoragePolicyName)
updateField("SSH_AUTHORIZED_KEY", &workerMachineConfig.Spec.Users[0].SshAuthorizedKeys[0])
updateField("SSH_USERNAME", &workerMachineConfig.Spec.Users[0].Name)
updateField("TEMPLATE", &workerMachineConfig.Spec.Template)
updateField("DATASTORE", &workerMachineConfig.Spec.Datastore)
updateField("FOLDER", &workerMachineConfig.Spec.Folder)
updateField("RESOURCE_POOL", &workerMachineConfig.Spec.ResourcePool)
updateField("STORAGE_POLICY_NAME", &workerMachineConfig.Spec.StoragePolicyName)
clusterOutput, err := yaml.Marshal(clusterConfig)
if err != nil {
return fmt.Errorf("outputting yaml: %v", err)
}
datacenterOutput, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("outputting yaml: %v", err)
}
controlPlaneMachineOutput, err := yaml.Marshal(controlPlaneMachineConfig)
if err != nil {
return fmt.Errorf("outputting yaml: %v", err)
}
workerMachineOutput, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("outputting yaml: %v", err)
}
result := strings.ReplaceAll(string(datacenterOutput), " aws: {}\n", "")
result = strings.ReplaceAll(result, " vsphere: {}\n", "")
result = string(clusterOutput) + "\n---\n" + result + "\n---\n" + string(controlPlaneMachineOutput) + "\n---\n" + string(workerMachineOutput)
writer, err := filewriter.NewWriter(filepath.Dir(clusterConfig.Name))
if err != nil {
return err
}
_, err = writer.Write(filepath.Base(clusterConfig.Name), []byte(result))
if err != nil {
return fmt.Errorf("writing to file %s: %v", clusterConfig.Name, err)
}
fmt.Printf("The following fields were updated: %v\n", updatedFields)
return nil
}
| 147 |
eks-anywhere | aws | Go | package cmd
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"log"
"net/http"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var sessions []string
var vsphereSessionRmCommand = &cobra.Command{
Use: "sessions",
Short: "vsphere logout sessions command",
Long: "This command logs out all of the provided VSphere user sessions ",
PreRun: prerunCmdBindFlags,
RunE: func(cmd *cobra.Command, args []string) error {
err := vsphereLogoutSessions(cmd.Context(), sessions)
if err != nil {
log.Fatalf("Error removing sessions: %v", err)
}
return nil
},
}
const (
sessionTokensFlag = "sessionTokens"
tlsInsecureFlag = "tlsInsecure"
vsphereApiEndpointFlag = "vsphereApiEndpoint"
)
func init() {
vsphereRmCmd.AddCommand(vsphereSessionRmCommand)
vsphereSessionRmCommand.Flags().StringSliceVarP(&sessions, sessionTokensFlag, "s", []string{}, "sessions to logout")
vsphereSessionRmCommand.Flags().Bool(tlsInsecureFlag, false, "if endpoint is tls secure or not")
vsphereSessionRmCommand.Flags().StringP(vsphereApiEndpointFlag, "e", "", "the URL of the vsphere API endpoint")
err := vsphereSessionRmCommand.MarkFlagRequired(vsphereApiEndpointFlag)
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
err = vsphereSessionRmCommand.MarkFlagRequired("sessionTokens")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func vsphereLogoutSessions(_ context.Context, sessions []string) error {
failedSessionLogouts := map[string]error{}
for _, session := range sessions {
err := logoutSession(session)
if err != nil {
failedSessionLogouts[session] = err
}
}
if len(failedSessionLogouts) > 0 {
for k, v := range failedSessionLogouts {
log.Printf("failed to log out session %s: %v", k, v)
}
return fmt.Errorf("failed to log %d sessions out of vsphere: %v", len(failedSessionLogouts), failedSessionLogouts)
}
return nil
}
func logoutSession(session string) error {
log.Printf("logging out of session %s", session)
sessionLogoutPayload := []byte(strings.TrimSpace(`
<?xml version="1.0" encoding="UTF-8"?><Envelope xmlns="http://schemas.xmlsoap.org/soap/envelope/">
<Body>
<Logout xmlns="urn:vim25">
<_this type="SessionManager">SessionManager</_this>
</Logout>
</Body>
</Envelope>`,
))
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: viper.GetBool(tlsInsecureFlag)},
}
client := &http.Client{Transport: tr}
url := fmt.Sprintf("%s/sdk", viper.GetString(vsphereApiEndpointFlag))
req, err := http.NewRequest("POST", url, bytes.NewReader(sessionLogoutPayload))
if err != nil {
return err
}
const sessionCookieKey = "vmware_soap_session"
cookie := http.Cookie{Name: sessionCookieKey, Value: session}
req.AddCookie(&cookie)
req.Header.Set("Content-Type", "text/xml")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
bodyString := string(bodyBytes)
sessionNotAuthenticatedFault := "The session is not authenticated."
if resp.StatusCode == 500 && strings.Contains(bodyString, sessionNotAuthenticatedFault) {
log.Printf("Can't logout session %s, it's not logged in", session)
return nil
}
if resp.StatusCode >= 499 {
log.Printf("failed to log out of vsphere session %s: %v", session, bodyString)
return fmt.Errorf("failed to log out of vsphere session %s: %v", session, bodyString)
}
log.Printf("Successfully logged out of vsphere session %s", session)
return nil
}
| 126 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var vsphereRmCmd = &cobra.Command{
Use: "rm",
Short: "VSphere rm commands",
Long: "Use eks-a-tool vsphere rm to run vsphere rm utilities",
}
func init() {
vsphereCmd.AddCommand(vsphereRmCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/validations"
)
var vsphereRmVmsCmd = &cobra.Command{
Use: "vms <cluster-name>",
Short: "VSphere rmvms command",
Long: "This command removes vms associated with a cluster name",
RunE: func(cmd *cobra.Command, args []string) error {
var err error
var clusterName string
clusterName, err = validations.ValidateClusterNameArg(args)
if err != nil {
return err
}
err = vsphereRmVms(cmd.Context(), clusterName, viper.GetBool("dry-run"))
if err != nil {
log.Fatalf("Error removing vms: %v", err)
}
return nil
},
}
func init() {
var err error
vsphereRmCmd.AddCommand(vsphereRmVmsCmd)
vsphereRmVmsCmd.Flags().Bool("dry-run", false, "Dry run flag")
err = viper.BindPFlags(vsphereRmVmsCmd.Flags())
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
}
func vsphereRmVms(ctx context.Context, clusterName string, dryRun bool) error {
executableBuilder, close, err := executables.InitInDockerExecutablesBuilder(ctx, executables.DefaultEksaImage())
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
defer close.CheckErr(ctx)
tmpWriter, _ := filewriter.NewWriter("rmvms")
govc := executableBuilder.BuildGovcExecutable(tmpWriter)
defer govc.Close(ctx)
return govc.CleanupVms(ctx, clusterName, dryRun)
}
| 60 |
eks-anywhere | aws | Go | package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd"
"github.com/aws/eks-anywhere/pkg/eksctl"
"github.com/aws/eks-anywhere/pkg/logger"
)
func main() {
sigChannel := make(chan os.Signal, 1)
signal.Notify(sigChannel, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigChannel
logger.Info("Warning: Terminating this operation may leave the cluster in an irrecoverable state")
os.Exit(-1)
}()
if eksctl.Enabled() {
err := eksctl.ValidateVersion()
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
if cmd.Execute() == nil {
os.Exit(0)
}
os.Exit(-1)
}
| 34 |
eks-anywhere | aws | Go | package cmd
import "github.com/spf13/cobra"
var applyCmd = &cobra.Command{
Use: "apply",
Short: "Apply resources",
Long: "Use eksctl anywhere apply to apply resources",
}
func init() {
rootCmd.AddCommand(applyCmd)
}
| 14 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type applyPackageOptions struct {
fileName string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
bundlesOverride string
}
var apo = &applyPackageOptions{}
func init() {
applyCmd.AddCommand(applyPackagesCommand)
applyPackagesCommand.Flags().StringVarP(&apo.fileName, "filename", "f",
"", "Filename that contains curated packages custom resources to apply")
applyPackagesCommand.Flags().StringVar(&apo.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file to use.")
applyPackagesCommand.Flags().StringVar(&apo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
err := applyPackagesCommand.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
var applyPackagesCommand = &cobra.Command{
Use: "package(s) [flags]",
Short: "Apply curated packages",
Long: "Apply Curated Packages Custom Resources to the cluster",
Aliases: []string{"package", "packages"},
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := applyPackages(cmd.Context()); err != nil {
return err
}
return nil
},
}
func applyPackages(ctx context.Context) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(apo.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithBundlesOverride(apo.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
packages := curatedpackages.NewPackageClient(
deps.Kubectl,
)
curatedpackages.PrintLicense()
err = packages.ApplyPackages(ctx, apo.fileName, kubeConfig)
if err != nil {
return err
}
return nil
}
| 77 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/version"
)
type checkImagesOptions struct {
fileName string
}
var cio = &checkImagesOptions{}
func init() {
rootCmd.AddCommand(checkImagesCommand)
checkImagesCommand.Flags().StringVarP(&cio.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
err := checkImagesCommand.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking filename flag as required: %v", err)
}
}
var checkImagesCommand = &cobra.Command{
Use: "check-images",
Short: "Check images used by EKS Anywhere do exist in the target registry",
Long: "This command is used to check images used by EKS-Anywhere for cluster provisioning do exist in the target registry",
PreRunE: func(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
if err := viper.BindPFlag(flag.Name, flag); err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
},
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
return checkImages(cmd.Context(), cio.fileName)
},
}
func checkImages(context context.Context, clusterSpecPath string) error {
images, err := getImages(clusterSpecPath, "")
if err != nil {
return err
}
clusterSpec, err := readAndValidateClusterSpec(clusterSpecPath, version.Get())
if err != nil {
return err
}
checkImageExistence := artifacts.CheckImageExistence{}
for _, image := range images {
myImageURI := registrymirror.FromCluster(clusterSpec.Cluster).ReplaceRegistry(image.URI)
checkImageExistence.ImageUri = myImageURI
if err = checkImageExistence.Run(context); err != nil {
fmt.Println(err.Error())
logger.MarkFail(myImageURI)
} else {
logger.MarkPass(myImageURI)
}
}
return nil
}
| 76 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"os"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
)
func cleanup(deps *dependencies.Dependencies, commandErr *error) {
if *commandErr == nil {
deps.Writer.CleanUpTemp()
}
}
func close(ctx context.Context, closer types.Closer) {
if err := closer.Close(ctx); err != nil {
logger.Error(err, "Closer failed", "closerType", fmt.Sprintf("%T", closer))
}
}
func cleanupDirectory(directory string) {
if _, err := os.Stat(directory); err == nil {
os.RemoveAll(directory)
}
}
| 30 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/version"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func getImages(clusterSpecPath, bundlesOverride string) ([]v1alpha1.Image, error) {
var specOpts []cluster.FileSpecBuilderOpt
if bundlesOverride != "" {
specOpts = append(specOpts, cluster.WithOverrideBundlesManifest(bundlesOverride))
}
clusterSpec, err := readAndValidateClusterSpec(clusterSpecPath, version.Get(), specOpts...)
if err != nil {
return nil, err
}
bundle := clusterSpec.VersionsBundle
images := append(bundle.Images(), clusterSpec.KubeDistroImages()...)
return images, nil
}
// getKubeconfigPath returns an EKS-A kubeconfig path. The return van be overriden using override
// to give preference to a user specified kubeconfig.
func getKubeconfigPath(clusterName, override string) string {
if override == "" {
return kubeconfig.FromClusterName(clusterName)
}
return override
}
func NewDependenciesForPackages(ctx context.Context, opts ...PackageOpt) (*dependencies.Dependencies, error) {
config := New(opts...)
return dependencies.NewFactory().
WithExecutableMountDirs(config.mountPaths...).
WithCustomBundles(config.bundlesOverride).
WithExecutableBuilder().
WithManifestReader().
WithKubectl().
WithHelm(executables.WithInsecure()).
WithCuratedPackagesRegistry(config.registryName, config.kubeVersion, version.Get()).
WithPackageControllerClient(config.spec, config.kubeConfig).
WithLogger().
Build(ctx)
}
type PackageOpt func(*PackageConfig)
type PackageConfig struct {
registryName string
kubeVersion string
kubeConfig string
mountPaths []string
spec *cluster.Spec
bundlesOverride string
}
func New(options ...PackageOpt) *PackageConfig {
pc := &PackageConfig{}
for _, o := range options {
o(pc)
}
return pc
}
func WithRegistryName(registryName string) func(*PackageConfig) {
return func(config *PackageConfig) {
config.registryName = registryName
}
}
func WithKubeVersion(kubeVersion string) func(*PackageConfig) {
return func(config *PackageConfig) {
config.kubeVersion = kubeVersion
}
}
func WithMountPaths(mountPaths ...string) func(*PackageConfig) {
return func(config *PackageConfig) {
config.mountPaths = mountPaths
}
}
func WithClusterSpec(spec *cluster.Spec) func(config *PackageConfig) {
return func(config *PackageConfig) {
config.spec = spec
}
}
func WithKubeConfig(kubeConfig string) func(*PackageConfig) {
return func(config *PackageConfig) {
config.kubeConfig = kubeConfig
}
}
// WithBundlesOverride sets bundlesOverride in the config with incoming value.
func WithBundlesOverride(bundlesOverride string) func(*PackageConfig) {
return func(config *PackageConfig) {
config.bundlesOverride = bundlesOverride
}
}
| 107 |
eks-anywhere | aws | Go | package cmd
const (
imagesTarFile = "images.tar"
eksaToolsImageTarFile = "tools-image.tar"
cpWaitTimeoutFlag = "control-plane-wait-timeout"
externalEtcdWaitTimeoutFlag = "external-etcd-wait-timeout"
perMachineWaitTimeoutFlag = "per-machine-wait-timeout"
unhealthyMachineTimeoutFlag = "unhealthy-machine-timeout"
nodeStartupTimeoutFlag = "node-startup-timeout"
noTimeoutsFlag = "no-timeouts"
)
type Operation int
const (
Create Operation = 0
Upgrade Operation = 1
Delete Operation = 2
)
| 21 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
// importCmd represents the import command.
var copyCmd = &cobra.Command{
Use: "copy",
Short: "Copy resources",
Long: "Copy EKS Anywhere resources and artifacts",
}
func init() {
rootCmd.AddCommand(copyCmd)
}
| 17 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"os"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/registry"
)
// copyPackagesCmd is the context for the copy packages command.
var copyPackagesCmd = &cobra.Command{
Use: "packages <destination-registry>",
Short: "Copy curated package images and charts from a source to a destination",
Long: `Copy all the EKS Anywhere curated package images and helm charts from a source to a destination.`,
SilenceUsage: true,
RunE: runCopyPackages,
Args: func(cmd *cobra.Command, args []string) error {
if err := cobra.ExactArgs(1)(cmd, args); err != nil {
return fmt.Errorf("A destination must be specified as an argument")
}
return nil
},
}
func init() {
copyCmd.AddCommand(copyPackagesCmd)
copyPackagesCmd.Flags().StringVarP(©PackagesCommand.bundleFile, "bundle", "b", "", "EKS-A bundle file to read artifact dependencies from")
if err := copyPackagesCmd.MarkFlagRequired("bundle"); err != nil {
log.Fatalf("Cannot mark 'bundle' flag as required: %s", err)
}
copyPackagesCmd.Flags().StringVarP(©PackagesCommand.dstCert, "dst-cert", "", "", "TLS certificate for destination registry")
copyPackagesCmd.Flags().StringVarP(©PackagesCommand.srcCert, "src-cert", "", "", "TLS certificate for source registry")
copyPackagesCmd.Flags().BoolVar(©PackagesCommand.insecure, "insecure", false, "Skip TLS verification while copying images and charts")
copyPackagesCmd.Flags().BoolVar(©PackagesCommand.dryRun, "dry-run", false, "Dry run copy to print images that would be copied")
copyPackagesCmd.Flags().StringVarP(©PackagesCommand.awsRegion, "aws-region", "", os.Getenv(config.EksaRegionEnv), "Region to copy images from")
}
var copyPackagesCommand = CopyPackagesCommand{}
// CopyPackagesCommand copies packages specified in a bundle to a destination.
type CopyPackagesCommand struct {
destination string
bundleFile string
srcCert string
dstCert string
insecure bool
dryRun bool
awsRegion string
registryCache *registry.Cache
}
func runCopyPackages(_ *cobra.Command, args []string) error {
ctx := context.Background()
copyPackagesCommand.destination = args[0]
credentialStore := registry.NewCredentialStore()
err := credentialStore.Init()
if err != nil {
return err
}
return copyPackagesCommand.call(ctx, credentialStore)
}
func (c CopyPackagesCommand) call(ctx context.Context, credentialStore *registry.CredentialStore) error {
factory := dependencies.NewFactory()
deps, err := factory.
WithManifestReader().
Build(ctx)
if err != nil {
return err
}
eksaBundle, err := bundles.Read(deps.ManifestReader, c.bundleFile)
if err != nil {
return err
}
c.registryCache = registry.NewCache()
bundleReader := curatedpackages.NewPackageReader(c.registryCache, credentialStore, c.awsRegion)
imageList := bundleReader.ReadChartsFromBundles(ctx, eksaBundle)
certificates, err := registry.GetCertificates(c.dstCert)
if err != nil {
return err
}
dstContext := registry.NewStorageContext(c.destination, credentialStore, certificates, c.insecure)
dstRegistry, err := c.registryCache.Get(dstContext)
if err != nil {
return fmt.Errorf("error with repository %s: %v", c.destination, err)
}
log.Printf("Copying curated packages helm charts from public ECR to %s", c.destination)
err = c.copyImages(ctx, dstRegistry, credentialStore, imageList)
if err != nil {
return err
}
imageList, err = bundleReader.ReadImagesFromBundles(ctx, eksaBundle)
if err != nil {
return err
}
dstRegistry.SetProject("curated-packages/")
log.Printf("Copying curated packages images from private ECR to %s", c.destination)
return c.copyImages(ctx, dstRegistry, credentialStore, imageList)
}
func (c CopyPackagesCommand) copyImages(ctx context.Context, dstRegistry registry.StorageClient, credentialStore *registry.CredentialStore, imageList []registry.Artifact) error {
certificates, err := registry.GetCertificates(c.srcCert)
if err != nil {
return err
}
for _, image := range imageList {
host := image.Registry
srcContext := registry.NewStorageContext(host, credentialStore, certificates, c.insecure)
srcRegistry, err := c.registryCache.Get(srcContext)
if err != nil {
return fmt.Errorf("error with repository %s: %v", host, err)
}
artifact := registry.NewArtifact(image.Registry, image.Repository, image.Tag, image.Digest)
log.Println(dstRegistry.Destination(artifact))
if c.dryRun {
continue
}
err = registry.Copy(ctx, srcRegistry, dstRegistry, artifact)
if err != nil {
return err
}
}
return nil
}
| 147 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var createCmd = &cobra.Command{
Use: "create",
Short: "Create resources",
Long: "Use eksctl anywhere create to create resources, such as clusters",
}
func init() {
rootCmd.AddCommand(createCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/awsiamauth"
"github.com/aws/eks-anywhere/pkg/clustermanager"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/validations/createvalidations"
"github.com/aws/eks-anywhere/pkg/workflow/management"
"github.com/aws/eks-anywhere/pkg/workflows"
)
type createClusterOptions struct {
clusterOptions
timeoutOptions
forceClean bool
skipIpCheck bool
hardwareCSVPath string
tinkerbellBootstrapIP string
installPackages string
}
var cc = &createClusterOptions{}
var createClusterCmd = &cobra.Command{
Use: "cluster -f <cluster-config-file> [flags]",
Short: "Create workload cluster",
Long: "This command is used to create workload clusters",
PreRunE: bindFlagsToViper,
SilenceUsage: true,
RunE: cc.createCluster,
}
func init() {
createCmd.AddCommand(createClusterCmd)
applyClusterOptionFlags(createClusterCmd.Flags(), &cc.clusterOptions)
applyTimeoutFlags(createClusterCmd.Flags(), &cc.timeoutOptions)
applyTinkerbellHardwareFlag(createClusterCmd.Flags(), &cc.hardwareCSVPath)
createClusterCmd.Flags().StringVar(&cc.tinkerbellBootstrapIP, "tinkerbell-bootstrap-ip", "", "Override the local tinkerbell IP in the bootstrap cluster")
createClusterCmd.Flags().BoolVar(&cc.forceClean, "force-cleanup", false, "Force deletion of previously created bootstrap cluster")
createClusterCmd.Flags().BoolVar(&cc.skipIpCheck, "skip-ip-check", false, "Skip check for whether cluster control plane ip is in use")
createClusterCmd.Flags().StringVar(&cc.installPackages, "install-packages", "", "Location of curated packages configuration files to install to the cluster")
if err := createClusterCmd.MarkFlagRequired("filename"); err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) error {
ctx := cmd.Context()
clusterConfigFileExist := validations.FileExists(cc.fileName)
if !clusterConfigFileExist {
return fmt.Errorf("the cluster config file %s does not exist", cc.fileName)
}
clusterConfig, err := v1alpha1.GetAndValidateClusterConfig(cc.fileName)
if err != nil {
return fmt.Errorf("the cluster config file provided is invalid: %v", err)
}
if clusterConfig.Spec.DatacenterRef.Kind == v1alpha1.TinkerbellDatacenterKind {
if err := checkTinkerbellFlags(cmd.Flags(), cc.hardwareCSVPath, Create); err != nil {
return err
}
}
docker := executables.BuildDockerExecutable()
if err := validations.CheckMinimumDockerVersion(ctx, docker); err != nil {
return fmt.Errorf("failed to validate docker: %v", err)
}
validations.CheckDockerAllocatedMemory(ctx, docker)
kubeconfigPath := kubeconfig.FromClusterName(clusterConfig.Name)
if validations.FileExistsAndIsNotEmpty(kubeconfigPath) {
return fmt.Errorf(
"old cluster config file exists under %s, please use a different clusterName to proceed",
clusterConfig.Name,
)
}
clusterSpec, err := newClusterSpec(cc.clusterOptions)
if err != nil {
return err
}
if err := validations.ValidateAuthenticationForRegistryMirror(clusterSpec); err != nil {
return err
}
cliConfig := buildCliConfig(clusterSpec)
dirs, err := cc.directoriesToMount(clusterSpec, cliConfig, cc.installPackages)
if err != nil {
return err
}
clusterManagerTimeoutOpts, err := buildClusterManagerOpts(cc.timeoutOptions, clusterSpec.Cluster.Spec.DatacenterRef.Kind)
if err != nil {
return fmt.Errorf("failed to build cluster manager opts: %v", err)
}
factory := dependencies.ForSpec(ctx, clusterSpec).WithExecutableMountDirs(dirs...).
WithBootstrapper().
WithCliConfig(cliConfig).
WithClusterManager(clusterSpec.Cluster, clusterManagerTimeoutOpts).
WithProvider(cc.fileName, clusterSpec.Cluster, cc.skipIpCheck, cc.hardwareCSVPath, cc.forceClean, cc.tinkerbellBootstrapIP).
WithGitOpsFlux(clusterSpec.Cluster, clusterSpec.FluxConfig, cliConfig).
WithWriter().
WithEksdInstaller().
WithPackageInstaller(clusterSpec, cc.installPackages, cc.managementKubeconfig).
WithValidatorClients()
if cc.timeoutOptions.noTimeouts {
factory.WithNoTimeouts()
}
deps, err := factory.Build(ctx)
if err != nil {
return err
}
defer close(ctx, deps)
createCluster := workflows.NewCreate(
deps.Bootstrapper,
deps.Provider,
deps.ClusterManager,
deps.GitOpsFlux,
deps.Writer,
deps.EksdInstaller,
deps.PackageInstaller,
)
validationOpts := &validations.Opts{
Kubectl: deps.UnAuthKubectlClient,
Spec: clusterSpec,
WorkloadCluster: &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: kubeconfig.FromClusterName(clusterSpec.Cluster.Name),
},
ManagementCluster: getManagementCluster(clusterSpec),
Provider: deps.Provider,
CliConfig: cliConfig,
}
createValidations := createvalidations.New(validationOpts)
if features.UseNewWorkflows().IsActive() {
deps, err = factory.
WithCNIInstaller(clusterSpec, deps.Provider).
Build(ctx)
if err != nil {
return err
}
wflw := &management.CreateCluster{
Spec: clusterSpec,
Bootstrapper: deps.Bootstrapper,
CreateBootstrapClusterOptions: deps.Provider,
CNIInstaller: deps.CNIInstaller,
Cluster: clustermanager.NewCreateClusterShim(clusterSpec, deps.ClusterManager, deps.Provider),
FS: deps.Writer,
}
wflw.WithHookRegistrar(awsiamauth.NewHookRegistrar(deps.AwsIamAuth, clusterSpec))
// Not all provider implementations want to bind hooks so we explicitly check if they
// want to bind hooks before registering it.
if registrar, ok := deps.Provider.(management.CreateClusterHookRegistrar); ok {
wflw.WithHookRegistrar(registrar)
}
err = wflw.Run(ctx)
} else {
err = createCluster.Run(ctx, clusterSpec, createValidations, cc.forceClean)
}
cleanup(deps, &err)
return err
}
| 190 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type createPackageOptions struct {
fileName string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
bundlesOverride string
}
var cpo = &createPackageOptions{}
func init() {
createCmd.AddCommand(createPackagesCommand)
createPackagesCommand.Flags().StringVarP(&cpo.fileName, "filename", "f",
"", "Filename that contains curated packages custom resources to create")
createPackagesCommand.Flags().StringVar(&cpo.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file to use.")
createPackagesCommand.Flags().StringVar(&cpo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
err := createPackagesCommand.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
var createPackagesCommand = &cobra.Command{
Use: "package(s) [flags]",
Short: "Create curated packages",
Long: "Create Curated Packages Custom Resources to the cluster",
Aliases: []string{"package", "packages"},
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := createPackages(cmd.Context()); err != nil {
return err
}
return nil
},
}
func createPackages(ctx context.Context) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(cpo.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithBundlesOverride(cpo.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
packages := curatedpackages.NewPackageClient(
deps.Kubectl,
)
curatedpackages.PrintLicense()
err = packages.CreatePackages(ctx, cpo.fileName, kubeConfig)
if err != nil {
return err
}
return nil
}
| 76 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Delete resources",
Long: "Use eksctl anywhere delete to delete clusters",
}
func init() {
rootCmd.AddCommand(deleteCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/workflows"
)
type deleteClusterOptions struct {
clusterOptions
wConfig string
forceCleanup bool
hardwareFileName string
tinkerbellBootstrapIP string
}
var dc = &deleteClusterOptions{}
var deleteClusterCmd = &cobra.Command{
Use: "cluster (<cluster-name>|-f <config-file>)",
Short: "Workload cluster",
Long: "This command is used to delete workload clusters created by eksctl anywhere",
PreRunE: bindFlagsToViper,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := dc.validate(cmd.Context(), args); err != nil {
return err
}
if err := dc.deleteCluster(cmd.Context()); err != nil {
return fmt.Errorf("failed to delete cluster: %v", err)
}
return nil
},
}
func init() {
deleteCmd.AddCommand(deleteClusterCmd)
deleteClusterCmd.Flags().StringVarP(&dc.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration, required if <cluster-name> is not provided")
deleteClusterCmd.Flags().StringVarP(&dc.wConfig, "w-config", "w", "", "Kubeconfig file to use when deleting a workload cluster")
deleteClusterCmd.Flags().BoolVar(&dc.forceCleanup, "force-cleanup", false, "Force deletion of previously created bootstrap cluster")
deleteClusterCmd.Flags().StringVar(&dc.managementKubeconfig, "kubeconfig", "", "kubeconfig file pointing to a management cluster")
deleteClusterCmd.Flags().StringVar(&dc.bundlesOverride, "bundles-override", "", "Override default Bundles manifest (not recommended)")
}
func (dc *deleteClusterOptions) validate(ctx context.Context, args []string) error {
if dc.fileName == "" {
clusterName, err := validations.ValidateClusterNameArg(args)
if err != nil {
return fmt.Errorf("please provide either a valid <cluster-name> or -f <config-file>")
}
filename := fmt.Sprintf("%[1]s/%[1]s-eks-a-cluster.yaml", clusterName)
if !validations.FileExists(filename) {
return fmt.Errorf("clusterconfig file %s for cluster: %s not found, please provide the clusterconfig path manually using -f <config-file>", filename, clusterName)
}
dc.fileName = filename
}
clusterConfig, err := commonValidation(ctx, dc.fileName)
if err != nil {
return err
}
kubeconfigPath := getKubeconfigPath(clusterConfig.Name, dc.wConfig)
if err := kubeconfig.ValidateFilename(kubeconfigPath); err != nil {
return err
}
return nil
}
func (dc *deleteClusterOptions) deleteCluster(ctx context.Context) error {
clusterSpec, err := newClusterSpec(dc.clusterOptions)
if err != nil {
return fmt.Errorf("unable to get cluster config from file: %v", err)
}
if err := validations.ValidateAuthenticationForRegistryMirror(clusterSpec); err != nil {
return err
}
cliConfig := buildCliConfig(clusterSpec)
dirs, err := dc.directoriesToMount(clusterSpec, cliConfig)
if err != nil {
return err
}
deps, err := dependencies.ForSpec(ctx, clusterSpec).WithExecutableMountDirs(dirs...).
WithBootstrapper().
WithCliConfig(cliConfig).
WithClusterManager(clusterSpec.Cluster, nil).
WithProvider(dc.fileName, clusterSpec.Cluster, cc.skipIpCheck, dc.hardwareFileName, false, dc.tinkerbellBootstrapIP).
WithGitOpsFlux(clusterSpec.Cluster, clusterSpec.FluxConfig, cliConfig).
WithWriter().
Build(ctx)
if err != nil {
return err
}
defer close(ctx, deps)
deleteCluster := workflows.NewDelete(
deps.Bootstrapper,
deps.Provider,
deps.ClusterManager,
deps.GitOpsFlux,
deps.Writer,
)
var cluster *types.Cluster
if clusterSpec.ManagementCluster == nil {
cluster = &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: kubeconfig.FromClusterName(clusterSpec.Cluster.Name),
ExistingManagement: false,
}
} else {
cluster = &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: clusterSpec.ManagementCluster.KubeconfigFile,
ExistingManagement: true,
}
}
err = deleteCluster.Run(ctx, cluster, clusterSpec, dc.forceCleanup, dc.managementKubeconfig)
cleanup(deps, &err)
return err
}
| 133 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type deletePackageOptions struct {
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
clusterName string
bundlesOverride string
}
var delPkgOpts = deletePackageOptions{}
func init() {
deleteCmd.AddCommand(deletePackageCommand)
deletePackageCommand.Flags().StringVar(&delPkgOpts.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file to use.")
deletePackageCommand.Flags().StringVar(&delPkgOpts.clusterName, "cluster", "",
"Cluster for package deletion.")
deletePackageCommand.Flags().StringVar(&delPkgOpts.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
if err := deletePackageCommand.MarkFlagRequired("cluster"); err != nil {
log.Fatalf("marking cluster flag as required: %s", err)
}
}
var deletePackageCommand = &cobra.Command{
Use: "package(s) [flags]",
Aliases: []string{"package", "packages"},
Short: "Delete package(s)",
Long: "This command is used to delete the curated packages custom resources installed in the cluster",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
return deleteResources(cmd.Context(), args)
},
Args: cobra.MinimumNArgs(1),
}
func deleteResources(ctx context.Context, args []string) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(delPkgOpts.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithBundlesOverride(delPkgOpts.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
packages := curatedpackages.NewPackageClient(
deps.Kubectl,
)
err = packages.DeletePackages(ctx, args, kubeConfig, delPkgOpts.clusterName)
if err != nil {
return err
}
return nil
}
| 71 |
eks-anywhere | aws | Go | package cmd
//////////////////////////////////////////////////////
//
// WARNING: The command defined in this file is DEPRECATED.
//
// See ./import_images.go for the newer command.
//
//////////////////////////////////////////////////////
import (
"context"
"fmt"
"log"
"net"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/networkutils"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/utils/urls"
"github.com/aws/eks-anywhere/pkg/version"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type importImagesOptions struct {
fileName string
}
var opts = &importImagesOptions{}
const ociPrefix = "oci://"
func init() {
rootCmd.AddCommand(importImagesCmdDeprecated)
importImagesCmdDeprecated.Flags().StringVarP(&opts.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
err := importImagesCmdDeprecated.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking filename flag as required: %v", err)
}
}
var importImagesCmdDeprecated = &cobra.Command{
Use: "import-images",
Short: "Push EKS Anywhere images to a private registry (Deprecated)",
Long: "This command is used to import images from an EKS Anywhere release bundle into a private registry",
PreRunE: preRunImportImagesCmd,
SilenceUsage: true,
Deprecated: "use `eksctl anywhere import images` instead",
RunE: func(cmd *cobra.Command, args []string) error {
if err := importImages(cmd.Context(), opts.fileName); err != nil {
return err
}
return nil
},
}
//gocyclo:ignore
func importImages(ctx context.Context, clusterSpecPath string) error {
registryUsername := os.Getenv("REGISTRY_USERNAME")
registryPassword := os.Getenv("REGISTRY_PASSWORD")
if registryUsername == "" || registryPassword == "" {
return fmt.Errorf("username or password not set. Provide REGISTRY_USERNAME and REGISTRY_PASSWORD for importing helm charts (e.g. cilium)")
}
clusterSpec, err := readAndValidateClusterSpec(clusterSpecPath, version.Get())
if err != nil {
return err
}
de := executables.BuildDockerExecutable()
bundle := clusterSpec.VersionsBundle
executableBuilder, closer, err := executables.InitInDockerExecutablesBuilder(ctx, bundle.Eksa.CliTools.VersionedImage())
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
defer closer.CheckErr(ctx)
helmExecutable := executableBuilder.BuildHelmExecutable(executables.WithInsecure())
if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration == nil || clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Endpoint == "" {
return fmt.Errorf("endpoint not set. It is necessary to define a valid endpoint in your spec (registryMirrorConfiguration.endpoint)")
}
host := clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Endpoint
port := clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Port
if port == "" {
logger.V(1).Info("RegistryMirrorConfiguration.Port is not specified, default port will be used", "Default Port", constants.DefaultHttpsPort)
port = constants.DefaultHttpsPort
}
if !networkutils.IsPortValid(clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Port) {
return fmt.Errorf("registry mirror port %s is invalid, please provide a valid port", clusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Port)
}
images, err := getImages(clusterSpecPath, "")
if err != nil {
return err
}
for _, image := range images {
if err := importImage(ctx, de, image.URI, net.JoinHostPort(host, port)); err != nil {
return fmt.Errorf("importing image %s: %v", image.URI, err)
}
}
endpoint := registrymirror.FromCluster(clusterSpec.Cluster).BaseRegistry
return importCharts(ctx, helmExecutable, bundle.Charts(), endpoint, registryUsername, registryPassword)
}
func importImage(ctx context.Context, docker *executables.Docker, image string, endpoint string) error {
if err := docker.PullImage(ctx, image); err != nil {
return err
}
if err := docker.TagImage(ctx, image, endpoint); err != nil {
return err
}
return docker.PushImage(ctx, image, endpoint)
}
func importCharts(ctx context.Context, helm *executables.Helm, charts map[string]*v1alpha1.Image, endpoint, username, password string) error {
if err := helm.RegistryLogin(ctx, endpoint, username, password); err != nil {
return err
}
for _, chart := range charts {
if err := importChart(ctx, helm, *chart, endpoint); err != nil {
return err
}
}
return nil
}
func importChart(ctx context.Context, helm *executables.Helm, chart v1alpha1.Image, endpoint string) error {
uri, chartVersion := getChartUriAndVersion(chart)
if err := helm.PullChart(ctx, uri, chartVersion); err != nil {
return err
}
return helm.PushChart(ctx, chart.ChartName(), pushChartURI(chart, endpoint))
}
func preRunImportImagesCmd(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
}
func getChartUriAndVersion(chart v1alpha1.Image) (uri, version string) {
uri = fmt.Sprintf("%s%s", ociPrefix, chart.Image())
version = chart.Tag()
return uri, version
}
func pushChartURI(chart v1alpha1.Image, registryEndpoint string) string {
orgURL := fmt.Sprintf("%s%s", ociPrefix, filepath.Dir(chart.Image()))
return urls.ReplaceHost(orgURL, registryEndpoint)
}
| 166 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var describeCmd = &cobra.Command{
Use: "describe",
Short: "Describe resources",
Long: "Use eksctl anywhere describe to show details of a specific resource or group of resources",
}
func init() {
rootCmd.AddCommand(describeCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type describePackagesOption struct {
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
clusterName string
bundlesOverride string
}
var dpo = &describePackagesOption{}
func init() {
describeCmd.AddCommand(describePackagesCommand)
describePackagesCommand.Flags().StringVar(&dpo.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file to use.")
describePackagesCommand.Flags().StringVar(&dpo.clusterName, "cluster", "",
"Cluster to describe packages.")
describePackagesCommand.Flags().StringVar(&dpo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
if err := describePackagesCommand.MarkFlagRequired("cluster"); err != nil {
log.Fatalf("marking cluster flag as required: %s", err)
}
}
var describePackagesCommand = &cobra.Command{
Use: "package(s) [flags]",
Short: "Describe curated packages in the cluster",
Aliases: []string{"package", "packages"},
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := describeResources(cmd.Context(), args); err != nil {
return err
}
return nil
},
}
func describeResources(ctx context.Context, args []string) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(dpo.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithBundlesOverride(dpo.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
packages := curatedpackages.NewPackageClient(
deps.Kubectl,
)
err = packages.DescribePackages(ctx, args, kubeConfig, dpo.clusterName)
if err != nil {
return err
}
return nil
}
| 72 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var downloadCmd = &cobra.Command{
Use: "download",
Short: "Download resources",
Long: "Use eksctl anywhere download to download artifacts (manifests, bundles) used by EKS Anywhere",
}
func init() {
rootCmd.AddCommand(downloadCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"fmt"
"io"
"log"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/manifests/releases"
"github.com/aws/eks-anywhere/pkg/version"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type downloadArtifactsOptions struct {
downloadDir string
fileName string
bundlesOverride string
dryRun bool
retainDir bool
}
var downloadArtifactsopts = &downloadArtifactsOptions{}
func init() {
downloadCmd.AddCommand(downloadArtifactsCmd)
downloadArtifactsCmd.Flags().StringVarP(&downloadArtifactsopts.bundlesOverride, "bundles-override", "", "", "Override default Bundles manifest (not recommended)")
downloadArtifactsCmd.Flags().StringVarP(&downloadArtifactsopts.fileName, "filename", "f", "", "[Deprecated] Filename that contains EKS-A cluster configuration")
downloadArtifactsCmd.Flags().StringVarP(&downloadArtifactsopts.downloadDir, "download-dir", "d", "eks-anywhere-downloads", "Directory to download the artifacts to")
downloadArtifactsCmd.Flags().BoolVarP(&downloadArtifactsopts.dryRun, "dry-run", "", false, "Print the manifest URIs without downloading them")
downloadArtifactsCmd.Flags().BoolVarP(&downloadArtifactsopts.retainDir, "retain-dir", "r", false, "Do not delete the download folder after creating a tarball")
}
var downloadArtifactsCmd = &cobra.Command{
Use: "artifacts",
Short: "Download EKS Anywhere artifacts/manifests to a tarball on disk",
Long: "This command is used to download the S3 artifacts from an EKS Anywhere bundle manifest and package them into a tarball",
PreRunE: preRunDownloadArtifactsCmd,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := downloadArtifacts(cmd.Context(), downloadArtifactsopts); err != nil {
return err
}
return nil
},
}
func downloadArtifacts(context context.Context, opts *downloadArtifactsOptions) error {
factory := dependencies.NewFactory()
deps, err := factory.
WithFileReader().
WithManifestReader().
Build(context)
if err != nil {
return err
}
reader := deps.FileReader
var b *releasev1.Bundles
if opts.bundlesOverride != "" {
b, err = bundles.Read(reader, opts.bundlesOverride)
if err != nil {
return err
}
} else {
b, err = deps.ManifestReader.ReadBundlesForVersion(version.Get().GitVersion)
if err != nil {
return err
}
}
// download the eks-a-release.yaml
if !opts.dryRun {
releaseManifestURL := releases.ManifestURL()
if err := downloadArtifact(filepath.Join(opts.downloadDir, filepath.Base(releaseManifestURL)), releaseManifestURL, reader); err != nil {
return fmt.Errorf("downloading release manifest: %v", err)
}
}
versionBundles := b.Spec.VersionsBundles
for i, bundle := range versionBundles {
for component, manifestList := range bundle.Manifests() {
for _, manifest := range manifestList {
if *manifest == "" {
// This can happen if the provider is not GA and not added to the bundle-release corresponding to an EKS-A release
continue
}
if opts.dryRun {
logger.Info(fmt.Sprintf("Found artifact: %s\n", *manifest))
continue
}
filePath := filepath.Join(opts.downloadDir, bundle.KubeVersion, component, filepath.Base(*manifest))
if err = downloadArtifact(filePath, *manifest, reader); err != nil {
return fmt.Errorf("downloading artifact for component %s: %v", component, err)
}
*manifest = filePath
}
}
b.Spec.VersionsBundles[i] = bundle
}
bundleReleaseContent, err := yaml.Marshal(b)
if err != nil {
return fmt.Errorf("marshaling bundle-release.yaml: %v", err)
}
bundleReleaseFilePath := filepath.Join(opts.downloadDir, "bundle-release.yaml")
if err = os.WriteFile(bundleReleaseFilePath, bundleReleaseContent, 0o644); err != nil {
return err
}
if !opts.dryRun {
if err = createTarball(opts.downloadDir); err != nil {
return err
}
if !opts.retainDir {
if err = os.RemoveAll(opts.downloadDir); err != nil {
return err
}
}
}
return nil
}
func preRunDownloadArtifactsCmd(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
if err := viper.BindPFlag(flag.Name, flag); err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
}
func downloadArtifact(filePath, artifactUri string, reader *files.Reader) error {
logger.V(3).Info(fmt.Sprintf("Downloading artifact: %s", artifactUri))
if err := os.MkdirAll(filepath.Dir(filePath), 0o755); err != nil {
return err
}
logger.V(3).Info(fmt.Sprintf("Creating local artifact file: %s", filePath))
contents, err := reader.ReadFile(artifactUri)
if err != nil {
return err
}
if err = os.WriteFile(filePath, contents, 0o644); err != nil {
return err
}
logger.V(3).Info(fmt.Sprintf("Successfully downloaded artifact %s to %s", artifactUri, filePath))
return nil
}
func createTarball(downloadDir string) error {
var buf bytes.Buffer
tarFileName := fmt.Sprintf("%s.tar.gz", downloadDir)
tarFile, err := os.Create(tarFileName)
if err != nil {
return err
}
defer tarFile.Close()
gzipWriter := gzip.NewWriter(&buf)
defer gzipWriter.Close()
tarWriter := tar.NewWriter(gzipWriter)
defer tarWriter.Close()
err = filepath.Walk(downloadDir, func(file string, fileInfo os.FileInfo, walkErr error) error {
header, err := tar.FileInfoHeader(fileInfo, file)
if err != nil {
return err
}
header.Name = filepath.ToSlash(file)
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
if !fileInfo.IsDir() {
data, err := os.Open(file)
if err != nil {
return err
}
if _, err := io.Copy(tarWriter, data); err != nil {
return err
}
logger.V(3).Info(fmt.Sprintf("Added file %s to tarball", file))
}
return nil
})
if err != nil {
return err
}
tarWriter.Close()
gzipWriter.Close()
if _, err = io.Copy(tarFile, &buf); err != nil {
return err
}
logger.V(3).Info(fmt.Sprintf("Successfully created downloads tarball %s", tarFileName))
return nil
}
| 223 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"log"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts"
"github.com/aws/eks-anywhere/pkg/curatedpackages/oras"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/docker"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/helm"
"github.com/aws/eks-anywhere/pkg/tar"
"github.com/aws/eks-anywhere/pkg/version"
)
// imagesCmd represents the images command.
var downloadImagesCmd = &cobra.Command{
Use: "images",
Short: "Download all eks-a images to disk",
Long: `Creates a tarball containing all necessary images
to create an eks-a cluster for any of the supported
Kubernetes versions.`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
return downloadImagesRunner.Run(ctx)
},
}
func init() {
downloadCmd.AddCommand(downloadImagesCmd)
downloadImagesCmd.Flags().StringVarP(&downloadImagesRunner.outputFile, "output", "o", "", "Output tarball containing all downloaded images")
if err := downloadImagesCmd.MarkFlagRequired("output"); err != nil {
log.Fatalf("Cannot mark 'output' flag as required: %s", err)
}
downloadImagesCmd.Flags().BoolVar(&downloadImagesRunner.includePackages, "include-packages", false, "this flag no longer works, use copy packages instead")
downloadImagesCmd.Flag("include-packages").Deprecated = "use copy packages command"
downloadImagesCmd.Flags().StringVarP(&downloadImagesRunner.bundlesOverride, "bundles-override", "", "", "Override default Bundles manifest (not recommended)")
downloadImagesCmd.Flags().BoolVar(&downloadImagesRunner.insecure, "insecure", false, "Flag to indicate skipping TLS verification while downloading helm charts")
}
var downloadImagesRunner = downloadImagesCommand{}
type downloadImagesCommand struct {
outputFile string
bundlesOverride string
includePackages bool
insecure bool
}
func (c downloadImagesCommand) Run(ctx context.Context) error {
factory := dependencies.NewFactory()
helmOpts := []executables.HelmOpt{}
if c.insecure {
helmOpts = append(helmOpts, executables.WithInsecure())
}
deps, err := factory.
WithFileReader().
WithManifestReader().
WithHelm(helmOpts...).
WithLogger().
Build(ctx)
if err != nil {
return err
}
defer deps.Close(ctx)
dockerClient := executables.BuildDockerExecutable()
downloadFolder := "tmp-eks-a-artifacts-download"
imagesFile := filepath.Join(downloadFolder, imagesTarFile)
eksaToolsImageFile := filepath.Join(downloadFolder, eksaToolsImageTarFile)
downloadArtifacts := artifacts.Download{
Reader: deps.ManifestReader,
FileReader: deps.FileReader,
BundlesImagesDownloader: docker.NewImageMover(
docker.NewOriginalRegistrySource(dockerClient),
docker.NewDiskDestination(dockerClient, imagesFile),
),
EksaToolsImageDownloader: docker.NewImageMover(
docker.NewOriginalRegistrySource(dockerClient),
docker.NewDiskDestination(dockerClient, eksaToolsImageFile),
),
ChartDownloader: helm.NewChartRegistryDownloader(deps.Helm, downloadFolder),
Version: version.Get(),
TmpDowloadFolder: downloadFolder,
DstFile: c.outputFile,
Packager: packagerForFile(c.outputFile),
ManifestDownloader: oras.NewBundleDownloader(deps.Logger, downloadFolder),
BundlesOverride: c.bundlesOverride,
}
return downloadArtifacts.Run(ctx)
}
type packager interface {
UnPackage(orgFile, dstFolder string) error
Package(sourceFolder, dstFile string) error
}
func packagerForFile(file string) packager {
if strings.HasSuffix(file, ".tar.gz") {
return tar.NewGzipPackager()
} else {
return tar.NewPackager()
}
}
| 116 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var expCmd = &cobra.Command{
Use: "exp",
Short: "experimental commands",
Long: "Use eksctl anywhere experimental commands",
}
func init() {
rootCmd.AddCommand(expCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/validations"
)
const (
TinkerbellHardwareCSVFlagName = "hardware-csv"
TinkerbellHardwareCSVFlagAlias = "z"
TinkerbellHardwareCSVFlagDescription = "Path to a CSV file containing hardware data."
KubeconfigFile = "kubeconfig"
)
func bindFlagsToViper(cmd *cobra.Command, args []string) error {
var err error
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
if err != nil {
return
}
err = viper.BindPFlag(flag.Name, flag)
})
return err
}
func applyClusterOptionFlags(flagSet *pflag.FlagSet, clusterOpt *clusterOptions) {
flagSet.StringVarP(&clusterOpt.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
flagSet.StringVar(&clusterOpt.bundlesOverride, "bundles-override", "", "Override default Bundles manifest (not recommended)")
flagSet.StringVar(&clusterOpt.managementKubeconfig, "kubeconfig", "", "Management cluster kubeconfig file")
}
func applyTinkerbellHardwareFlag(flagSet *pflag.FlagSet, pathOut *string) {
flagSet.StringVarP(
pathOut,
TinkerbellHardwareCSVFlagName,
TinkerbellHardwareCSVFlagAlias,
"",
TinkerbellHardwareCSVFlagDescription,
)
}
func checkTinkerbellFlags(flagSet *pflag.FlagSet, hardwareCSVPath string, operationType Operation) error {
flag := flagSet.Lookup(TinkerbellHardwareCSVFlagName)
// If no flag was returned there is a developer error as the flag has been removed
// from the program rendering it invalid.
if flag == nil {
panic("'hardwarefile' flag not configured")
}
if !viper.IsSet(TinkerbellHardwareCSVFlagName) || viper.GetString(TinkerbellHardwareCSVFlagName) == "" {
if operationType == Create && !viper.IsSet(KubeconfigFile) { // For upgrade and workload cluster create, hardware-csv is an optional flag
return fmt.Errorf("required flag \"%v\" not set", TinkerbellHardwareCSVFlagName)
}
return nil
}
if !validations.FileExists(hardwareCSVPath) {
return fmt.Errorf("hardware config file %s does not exist", hardwareCSVPath)
}
return nil
}
| 69 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var generateCmd = &cobra.Command{
Use: "generate",
Short: "Generate resources",
Long: "Use eksctl anywhere generate to generate resources, such as clusterconfig yaml",
}
func init() {
rootCmd.AddCommand(generateCmd)
}
| 16 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/diagnostics"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/version"
)
type generateSupportBundleOptions struct {
fileName string
hardwareFileName string
tinkerbellBootstrapIP string
}
var gsbo = &generateSupportBundleOptions{}
var generateBundleConfigCmd = &cobra.Command{
Use: "support-bundle-config",
Short: "Generate support bundle config",
Long: "This command is used to generate a default support bundle config yaml",
PreRunE: preRunGenerateBundleConfigCmd,
RunE: func(cmd *cobra.Command, args []string) error {
err := gsbo.validateCmdInput()
if err != nil {
return fmt.Errorf("command input validation failed: %v", err)
}
bundle, err := gsbo.generateBundleConfig(cmd.Context())
if err != nil {
return fmt.Errorf("failed to generate bunlde config: %v", err)
}
err = bundle.PrintBundleConfig()
if err != nil {
return fmt.Errorf("failed to print bundle config: %v", err)
}
return nil
},
}
func init() {
generateCmd.AddCommand(generateBundleConfigCmd)
generateBundleConfigCmd.Flags().StringVarP(&gsbo.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
}
func preRunGenerateBundleConfigCmd(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
}
func (gsbo *generateSupportBundleOptions) validateCmdInput() error {
f := gsbo.fileName
if f != "" {
clusterConfigFileExist := validations.FileExists(f)
if !clusterConfigFileExist {
return fmt.Errorf("the cluster config file %s does not exist", f)
}
_, err := v1alpha1.GetAndValidateClusterConfig(f)
if err != nil {
return fmt.Errorf("unable to get cluster config from file: %v", err)
}
}
return nil
}
func (gsbo *generateSupportBundleOptions) generateBundleConfig(ctx context.Context) (diagnostics.DiagnosticBundle, error) {
clusterConfigPath := gsbo.fileName
if clusterConfigPath == "" {
return gsbo.generateDefaultBundleConfig(ctx)
}
clusterSpec, err := readAndValidateClusterSpec(clusterConfigPath, version.Get())
if err != nil {
return nil, fmt.Errorf("unable to get cluster config from file: %v", err)
}
deps, err := dependencies.ForSpec(ctx, clusterSpec).
WithProvider(clusterConfigPath, clusterSpec.Cluster, cc.skipIpCheck, gsbo.hardwareFileName, false, gsbo.tinkerbellBootstrapIP).
WithDiagnosticBundleFactory().
Build(ctx)
if err != nil {
return nil, err
}
defer close(ctx, deps)
return deps.DignosticCollectorFactory.DiagnosticBundleWorkloadCluster(clusterSpec, deps.Provider, kubeconfig.FromClusterName(clusterSpec.Cluster.Name))
}
func (gsbo *generateSupportBundleOptions) generateDefaultBundleConfig(ctx context.Context) (diagnostics.DiagnosticBundle, error) {
f := dependencies.NewFactory().WithFileReader()
deps, err := f.Build(ctx)
if err != nil {
return nil, err
}
defer close(ctx, deps)
factory := diagnostics.NewFactory(diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: diagnostics.NewAnalyzerFactory(),
CollectorFactory: diagnostics.NewDefaultCollectorFactory(deps.FileReader),
})
return factory.DiagnosticBundleDefault(), nil
}
| 117 |
eks-anywhere | aws | Go | package cmd
import (
"fmt"
"log"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/validations"
)
var removeFromDefaultConfig = []string{"spec.clusterNetwork.dns"}
var generateClusterConfigCmd = &cobra.Command{
Use: "clusterconfig <cluster-name> (max 80 chars)",
Short: "Generate cluster config",
Long: "This command is used to generate a cluster config yaml for the create cluster command",
PreRun: preRunGenerateClusterConfig,
RunE: func(cmd *cobra.Command, args []string) error {
clusterName, err := validations.ValidateClusterNameArg(args)
if err != nil {
return err
}
err = generateClusterConfig(clusterName)
if err != nil {
return fmt.Errorf("generating eks-a cluster config: %v", err) // need to have better error handling here in own func
}
return nil
},
}
func preRunGenerateClusterConfig(cmd *cobra.Command, args []string) {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
err := viper.BindPFlag(flag.Name, flag)
if err != nil {
log.Fatalf("initializing flags: %v", err)
}
})
}
func init() {
generateCmd.AddCommand(generateClusterConfigCmd)
generateClusterConfigCmd.Flags().StringP("provider", "p", "", "Provider to use (vsphere or tinkerbell or docker)")
err := generateClusterConfigCmd.MarkFlagRequired("provider")
if err != nil {
log.Fatalf("marking flag as required: %v", err)
}
}
func generateClusterConfig(clusterName string) error {
var resources [][]byte
var datacenterYaml []byte
var machineGroupYaml [][]byte
var clusterConfigOpts []v1alpha1.ClusterGenerateOpt
switch strings.ToLower(viper.GetString("provider")) {
case constants.DockerProviderName:
datacenterConfig := v1alpha1.NewDockerDatacenterConfigGenerate(clusterName)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(1),
v1alpha1.ExternalETCDConfigCount(1),
v1alpha1.WorkerNodeConfigCount(1),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml
case constants.VSphereProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewVSphereDatacenterConfigGenerate(clusterName)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(2),
v1alpha1.ExternalETCDConfigCount(3),
v1alpha1.WorkerNodeConfigCount(2),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml
// need to default control plane config name to something different from the cluster name based on assumption
// in controller code
cpMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(clusterName)
etcdMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetEtcdNodeName(clusterName))
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
v1alpha1.WithEtcdMachineGroupRef(etcdMachineConfig),
)
cpMcYaml, err := yaml.Marshal(cpMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
etcdMcYaml, err := yaml.Marshal(etcdMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml, etcdMcYaml)
case constants.SnowProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewSnowDatacenterConfigGenerate(clusterName)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(3),
v1alpha1.WorkerNodeConfigCount(3),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml
cpMachineConfig := v1alpha1.NewSnowMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewSnowMachineConfigGenerate(clusterName)
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
)
cpMcYaml, err := yaml.Marshal(cpMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml)
case constants.CloudStackProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewCloudStackDatacenterConfigGenerate(clusterName)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(2),
v1alpha1.ExternalETCDConfigCount(3),
v1alpha1.WorkerNodeConfigCount(2),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml
// need to default control plane config name to something different from the cluster name based on assumption
// in controller code
cpMachineConfig := v1alpha1.NewCloudStackMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewCloudStackMachineConfigGenerate(clusterName)
etcdMachineConfig := v1alpha1.NewCloudStackMachineConfigGenerate(providers.GetEtcdNodeName(clusterName))
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
v1alpha1.WithEtcdMachineGroupRef(etcdMachineConfig),
)
cpMcYaml, err := yaml.Marshal(cpMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
etcdMcYaml, err := yaml.Marshal(etcdMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml, etcdMcYaml)
case constants.TinkerbellProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewTinkerbellDatacenterConfigGenerate(clusterName)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(1),
v1alpha1.WorkerNodeConfigCount(1),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml
cpMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(clusterName)
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
)
cpMcYaml, err := yaml.Marshal(cpMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml)
case constants.NutanixProviderName:
datacenterConfig := v1alpha1.NewNutanixDatacenterConfigGenerate(clusterName)
dcYaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("failed to generate cluster yaml: %v", err)
}
datacenterYaml = dcYaml
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(3),
v1alpha1.WorkerNodeConfigCount(3),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
cpMachineConfig := v1alpha1.NewNutanixMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
cpMcYaml, err := yaml.Marshal(cpMachineConfig)
if err != nil {
return fmt.Errorf("failed to generate cluster yaml: %v", err)
}
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithCPMachineGroupRef(cpMachineConfig))
workerMachineConfig := v1alpha1.NewNutanixMachineConfigGenerate(clusterName)
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("failed to generate cluster yaml: %v", err)
}
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig))
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml)
default:
return fmt.Errorf("not a valid provider")
}
config := v1alpha1.NewClusterGenerate(clusterName, clusterConfigOpts...)
configMarshal, err := yaml.Marshal(config)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
clusterYaml, err := api.CleanupPathsFromYaml(configMarshal, removeFromDefaultConfig)
if err != nil {
return fmt.Errorf("cleaning up paths from yaml: %v", err)
}
resources = append(resources, clusterYaml, datacenterYaml)
if len(machineGroupYaml) > 0 {
resources = append(resources, machineGroupYaml...)
}
fmt.Println(string(templater.AppendYamlResources(resources...)))
return nil
}
| 265 |
eks-anywhere | aws | Go | package cmd
import (
"bufio"
"fmt"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
type hardwareOptions struct {
csvPath string
outputPath string
}
var hOpts = &hardwareOptions{}
var generateHardwareCmd = &cobra.Command{
Use: "hardware",
Short: "Generate hardware files",
Long: `
Generate Kubernetes hardware YAML manifests for each Hardware entry in the source.
`,
RunE: hOpts.generateHardware,
}
func init() {
generateCmd.AddCommand(generateHardwareCmd)
flags := generateHardwareCmd.Flags()
flags.StringVarP(&hOpts.outputPath, "output", "o", "", "Path to output hardware YAML.")
flags.StringVarP(
&hOpts.csvPath,
TinkerbellHardwareCSVFlagName,
TinkerbellHardwareCSVFlagAlias,
"",
TinkerbellHardwareCSVFlagDescription,
)
if err := generateHardwareCmd.MarkFlagRequired(TinkerbellHardwareCSVFlagName); err != nil {
panic(err)
}
}
func (hOpts *hardwareOptions) generateHardware(cmd *cobra.Command, args []string) error {
hardwareYaml, err := hardware.BuildHardwareYAML(hOpts.csvPath)
if err != nil {
return fmt.Errorf("building hardware yaml from csv: %v", err)
}
fh, err := hardware.CreateOrStdout(hOpts.outputPath)
if err != nil {
return err
}
bufferedWriter := bufio.NewWriter(fh)
defer bufferedWriter.Flush()
_, err = bufferedWriter.Write(hardwareYaml)
if err != nil {
return fmt.Errorf("writing hardware yaml to output: %v", err)
}
return nil
}
| 65 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type generatePackageOptions struct {
kubeVersion string
clusterName string
registry string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
bundlesOverride string
}
var gpOptions = &generatePackageOptions{}
func init() {
generateCmd.AddCommand(generatePackageCommand)
generatePackageCommand.Flags().StringVar(&gpOptions.clusterName, "cluster", "", "Name of cluster for package generation")
generatePackageCommand.Flags().StringVar(&gpOptions.kubeVersion, "kube-version", "", "Kubernetes Version of the cluster to be used. Format <major>.<minor>")
generatePackageCommand.Flags().StringVar(&gpOptions.registry, "registry", "", "Used to specify an alternative registry for package generation")
generatePackageCommand.Flags().StringVar(&gpOptions.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file to use.")
generatePackageCommand.Flags().StringVar(&gpOptions.bundlesOverride, "bundles-override", "", "Override default Bundles manifest (not recommended)")
if err := generatePackageCommand.MarkFlagRequired("cluster"); err != nil {
log.Fatalf("marking cluster flag as required: %s", err)
}
}
var generatePackageCommand = &cobra.Command{
Use: "packages [flags] package",
Aliases: []string{"package", "packages"},
Short: "Generate package(s) configuration",
Long: "Generates Kubernetes configuration files for curated packages",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: runGeneratePackages,
Args: func(cmd *cobra.Command, args []string) error {
if err := cobra.ExactArgs(1)(cmd, args); err == nil {
return nil
}
return fmt.Errorf("The name of the package to install must be specified as an argument")
},
}
func runGeneratePackages(cmd *cobra.Command, args []string) error {
clusterName := gpOptions.clusterName
if len(gpOptions.kubeVersion) > 0 {
// allow both
clusterName = ""
}
if err := curatedpackages.ValidateKubeVersion(gpOptions.kubeVersion, clusterName); err != nil {
return err
}
return generatePackages(cmd.Context(), args)
}
func generatePackages(ctx context.Context, args []string) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(gpOptions.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithRegistryName(gpOptions.registry), WithKubeVersion(gpOptions.kubeVersion), WithMountPaths(kubeConfig), WithBundlesOverride(gpOptions.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
bm := curatedpackages.CreateBundleManager(deps.Logger)
b := curatedpackages.NewBundleReader(kubeConfig, gpOptions.clusterName, deps.Kubectl, bm, deps.BundleRegistry)
bundle, err := b.GetLatestBundle(ctx, gpOptions.kubeVersion)
if err != nil {
return err
}
packageClient := curatedpackages.NewPackageClient(
deps.Kubectl,
curatedpackages.WithBundle(bundle),
curatedpackages.WithCustomPackages(args),
)
packages, err := packageClient.GeneratePackages(gpOptions.clusterName)
if err != nil {
return err
}
if err = packageClient.WritePackagesToStdOut(packages); err != nil {
return err
}
return nil
}
| 100 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/aws/eks-anywhere/pkg/constants"
)
var getCmd = &cobra.Command{
Use: "get",
Short: "Get resources",
Long: "Use eksctl anywhere get to display one or many resources",
}
func init() {
rootCmd.AddCommand(getCmd)
}
func preRunPackages(cmd *cobra.Command, args []string) error {
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
if err := viper.BindPFlag(flag.Name, flag); err != nil {
log.Fatalf("Error initializing flags: %v", err)
}
})
return nil
}
func getResources(ctx context.Context, resourceType, output, kubeConfig, clusterName, bundlesOverride string, args []string) error {
deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithBundlesOverride(bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
kubectl := deps.Kubectl
namespace := constants.EksaPackagesName
if len(clusterName) > 0 {
namespace = namespace + "-" + clusterName
}
params := []string{"get", resourceType, "--kubeconfig", kubeConfig, "--namespace", namespace}
params = append(params, args...)
if output != "" {
params = append(params, "-o", output)
}
stdOut, err := kubectl.ExecuteCommand(ctx, params...)
if err != nil {
fmt.Print(&stdOut)
return fmt.Errorf("kubectl execution failure: \n%v", err)
}
if len(stdOut.Bytes()) == 0 {
fmt.Printf("No resources found in %v namespace\n", constants.EksaPackagesName)
return nil
}
fmt.Print(&stdOut)
return nil
}
| 62 |
eks-anywhere | aws | Go | package cmd
import (
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type getPackageOptions struct {
output string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
clusterName string
bundlesOverride string
}
var gpo = &getPackageOptions{}
func init() {
getCmd.AddCommand(getPackageCommand)
getPackageCommand.Flags().StringVarP(&gpo.output, "output", "o", "",
"Specifies the output format (valid option: json, yaml)")
getPackageCommand.Flags().StringVar(&gpo.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file.")
getPackageCommand.Flags().StringVar(&gpo.clusterName, "cluster", "",
"Cluster to get list of packages.")
getPackageCommand.Flags().StringVar(&gpo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
if err := getPackageCommand.MarkFlagRequired("cluster"); err != nil {
log.Fatalf("marking cluster flag as required: %s", err)
}
}
var getPackageCommand = &cobra.Command{
Use: "package(s) [flags]",
Aliases: []string{"package", "packages"},
Short: "Get package(s)",
Long: "This command is used to display the curated packages installed in the cluster",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(gpo.kubeConfig, "")
if err != nil {
return err
}
return getResources(cmd.Context(), "packages", gpo.output, kubeConfig, gpo.clusterName, gpo.bundlesOverride, args)
},
}
| 53 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type getPackageBundleOptions struct {
output string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
bundlesOverride string
}
var gpbo = &getPackageBundleOptions{}
func init() {
getCmd.AddCommand(getPackageBundleCommand)
getPackageBundleCommand.Flags().StringVarP(&gpbo.output, "output", "o", "",
"Specifies the output format (valid option: json, yaml)")
getPackageBundleCommand.Flags().StringVar(&gpbo.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file.")
getPackageBundleCommand.Flags().StringVar(&gpbo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
}
var getPackageBundleCommand = &cobra.Command{
Use: "packagebundle(s) [flags]",
Aliases: []string{"packagebundle", "packagebundles"},
Short: "Get packagebundle(s)",
Long: "This command is used to display the currently supported packagebundles",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(gpbo.kubeConfig, "")
if err != nil {
return err
}
return getResources(cmd.Context(), "packagebundles", gpbo.output, kubeConfig, "", gpbo.bundlesOverride, args)
},
}
| 45 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type getPackageBundleControllerOptions struct {
output string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
bundlesOverride string
}
var gpbco = &getPackageBundleControllerOptions{}
func init() {
getCmd.AddCommand(getPackageBundleControllerCommand)
getPackageBundleControllerCommand.Flags().StringVarP(&gpbco.output, "output",
"o", "", "Specifies the output format (valid option: json, yaml)")
getPackageBundleControllerCommand.Flags().StringVar(&gpbco.kubeConfig,
"kubeconfig", "", "Path to an optional kubeconfig file.")
getPackageBundleControllerCommand.Flags().StringVar(&gpbco.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
}
var getPackageBundleControllerCommand = &cobra.Command{
Use: "packagebundlecontroller(s) [flags]",
Aliases: []string{"packagebundlecontroller", "packagebundlecontrollers", "pbc"},
Short: "Get packagebundlecontroller(s)",
Long: "This command is used to display the current packagebundlecontrollers",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(gpbco.kubeConfig, "")
if err != nil {
return err
}
return getResources(cmd.Context(), "packagebundlecontrollers", gpbco.output, kubeConfig, "", gpbco.bundlesOverride, args)
},
}
| 45 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
// importCmd represents the import command.
var importCmd = &cobra.Command{
Use: "import",
Short: "Import resources",
Long: "Use eksctl anywhere import to import resources, such as images and helm charts",
}
func init() {
rootCmd.AddCommand(importCmd)
}
| 17 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"log"
"path/filepath"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/cmd/eksctl-anywhere/cmd/internal/commands/artifacts"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/curatedpackages/oras"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/docker"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/helm"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/types"
)
// imagesCmd represents the images command.
var importImagesCmd = &cobra.Command{
Use: "images",
Short: "Import images and charts to a registry from a tarball",
Long: `Import all the images and helm charts necessary for EKS Anywhere clusters into a registry.
Use this command in conjunction with download images, passing it output tarball as input to this command.`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
return importImagesCommand.Call(ctx)
},
}
func init() {
importCmd.AddCommand(importImagesCmd)
importImagesCmd.Flags().StringVarP(&importImagesCommand.InputFile, "input", "i", "", "Input tarball containing all images and charts to import")
if err := importImagesCmd.MarkFlagRequired("input"); err != nil {
log.Fatalf("Cannot mark 'input' as required: %s", err)
}
importImagesCmd.Flags().StringVarP(&importImagesCommand.RegistryEndpoint, "registry", "r", "", "Registry where to import images and charts")
if err := importImagesCmd.MarkFlagRequired("registry"); err != nil {
log.Fatalf("Cannot mark 'registry' as required: %s", err)
}
importImagesCmd.Flags().StringVarP(&importImagesCommand.BundlesFile, "bundles", "b", "", "Bundles file to read artifact dependencies from")
if err := importImagesCmd.MarkFlagRequired("bundles"); err != nil {
log.Fatalf("Cannot mark 'bundles' as required: %s", err)
}
importImagesCmd.Flags().BoolVar(&importImagesCommand.includePackages, "include-packages", false, "Flag to indicate inclusion of curated packages in imported images")
importImagesCmd.Flag("include-packages").Deprecated = "use copy packages command"
importImagesCmd.Flags().BoolVar(&importImagesCommand.insecure, "insecure", false, "Flag to indicate skipping TLS verification while pushing helm charts and bundles")
}
var importImagesCommand = ImportImagesCommand{}
type ImportImagesCommand struct {
InputFile string
RegistryEndpoint string
BundlesFile string
includePackages bool
insecure bool
}
func (c ImportImagesCommand) Call(ctx context.Context) error {
username, password, err := config.ReadCredentials()
if err != nil {
return err
}
factory := dependencies.NewFactory()
deps, err := factory.
WithManifestReader().
Build(ctx)
if err != nil {
return err
}
bundle, err := bundles.Read(deps.ManifestReader, c.BundlesFile)
if err != nil {
return err
}
artifactsFolder := "tmp-eks-a-artifacts"
dockerClient := executables.BuildDockerExecutable()
toolsImageFile := filepath.Join(artifactsFolder, eksaToolsImageTarFile)
// Import the eksa tools image into the registry first, so it can be used immediately
// after to build the helm executable
importToolsImage := artifacts.ImportToolsImage{
Bundles: bundle,
InputFile: c.InputFile,
TmpArtifactsFolder: artifactsFolder,
UnPackager: packagerForFile(c.InputFile),
ImageMover: docker.NewImageMover(
docker.NewDiskSource(dockerClient, toolsImageFile),
docker.NewRegistryDestination(dockerClient, c.RegistryEndpoint),
),
}
if err = importToolsImage.Run(ctx); err != nil {
return err
}
dirsToMount, err := cc.cloudStackDirectoriesToMount()
if err != nil {
return err
}
helmOpts := []executables.HelmOpt{}
if c.insecure {
helmOpts = append(helmOpts, executables.WithInsecure())
}
deps, err = factory.
WithExecutableMountDirs(dirsToMount...).
WithRegistryMirror(®istrymirror.RegistryMirror{
BaseRegistry: c.RegistryEndpoint,
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: c.RegistryEndpoint,
constants.DefaultCuratedPackagesRegistryRegex: c.RegistryEndpoint,
},
Auth: false,
}).
UseExecutableImage(bundle.DefaultEksAToolsImage().VersionedImage()).
WithHelm(helmOpts...).
Build(ctx)
if err != nil {
return err
}
defer deps.Close(ctx)
imagesFile := filepath.Join(artifactsFolder, "images.tar")
importArtifacts := artifacts.Import{
Reader: deps.ManifestReader,
Bundles: bundle,
ImageMover: docker.NewImageMover(
docker.NewDiskSource(dockerClient, imagesFile),
docker.NewRegistryDestination(dockerClient, c.RegistryEndpoint),
),
ChartImporter: helm.NewChartRegistryImporter(
deps.Helm, artifactsFolder,
c.RegistryEndpoint,
username,
password,
),
TmpArtifactsFolder: artifactsFolder,
FileImporter: oras.NewFileRegistryImporter(c.RegistryEndpoint, username, password, artifactsFolder),
}
return importArtifacts.Run(context.WithValue(ctx, types.InsecureRegistry, c.insecure))
}
| 154 |
eks-anywhere | aws | Go | package cmd
import "github.com/spf13/cobra"
var installCmd = &cobra.Command{
Use: "install",
Short: "Install resources to the cluster",
Long: "Use eksctl anywhere install to install resources into a cluster",
}
func init() {
rootCmd.AddCommand(installCmd)
}
| 14 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"errors"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/version"
)
type installControllerOptions struct {
fileName string
kubeConfig string
bundlesOverride string
}
var ico = &installControllerOptions{}
func init() {
installCmd.AddCommand(installPackageControllerCommand)
installPackageControllerCommand.Flags().StringVarP(&ico.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
installPackageControllerCommand.Flags().StringVar(&ico.kubeConfig, "kubeConfig", "", "Management cluster kubeconfig file")
installPackageControllerCommand.Flags().StringVar(&ico.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
if err := installPackageControllerCommand.MarkFlagRequired("filename"); err != nil {
log.Fatalf("Error marking flag as required: %v", err)
}
}
var installPackageControllerCommand = &cobra.Command{
Use: "packagecontroller",
Aliases: []string{"pc"},
Short: "Install packagecontroller on the cluster",
Long: "This command is used to Install the packagecontroller on to an existing cluster",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: runInstallPackageController,
}
func runInstallPackageController(cmd *cobra.Command, args []string) error {
clusterConfigFileExist := validations.FileExists(ico.fileName)
if !clusterConfigFileExist {
return fmt.Errorf("the cluster config file %s does not exist", ico.fileName)
}
return installPackageController(cmd.Context())
}
func installPackageController(ctx context.Context) error {
kubeConfig := kubeconfig.FromEnvironment()
clusterSpec, err := readAndValidateClusterSpec(ico.fileName, version.Get())
if err != nil {
return fmt.Errorf("the cluster config file provided is invalid: %v", err)
}
deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithClusterSpec(clusterSpec), WithKubeConfig(ico.kubeConfig), WithBundlesOverride(ico.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
ctrlClient := deps.PackageControllerClient
if clusterSpec.Cluster.IsSelfManaged() && ctrlClient.IsInstalled(ctx) {
return errors.New("curated Packages controller exists in the current cluster")
}
if curatedpackages.IsPackageControllerDisabled(clusterSpec.Cluster) {
return errors.New("package controller disabled in cluster specification")
}
curatedpackages.PrintLicense()
err = ctrlClient.Enable(ctx)
if err != nil {
return err
}
return nil
}
| 85 |
eks-anywhere | aws | Go | package cmd
import (
"context"
"fmt"
"log"
"github.com/spf13/cobra"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
)
type installPackageOptions struct {
kubeVersion string
clusterName string
packageName string
registry string
customConfigs []string
// kubeConfig is an optional kubeconfig file to use when querying an
// existing cluster.
kubeConfig string
bundlesOverride string
}
var ipo = &installPackageOptions{}
func init() {
installCmd.AddCommand(installPackageCommand)
installPackageCommand.Flags().StringVar(&ipo.kubeVersion, "kube-version", "",
"Kubernetes Version of the cluster to be used. Format <major>.<minor>")
installPackageCommand.Flags().StringVarP(&ipo.packageName, "package-name", "n",
"", "Custom name of the curated package to install")
installPackageCommand.Flags().StringVar(&ipo.registry, "registry",
"", "Used to specify an alternative registry for discovery")
installPackageCommand.Flags().StringArrayVar(&ipo.customConfigs, "set",
[]string{}, "Provide custom configurations for curated packages. Format key:value")
installPackageCommand.Flags().StringVar(&ipo.kubeConfig, "kubeconfig", "",
"Path to an optional kubeconfig file to use.")
installPackageCommand.Flags().StringVar(&ipo.clusterName, "cluster", "",
"Target cluster for installation.")
installPackageCommand.Flags().StringVar(&ipo.bundlesOverride, "bundles-override", "",
"Override default Bundles manifest (not recommended)")
if err := installPackageCommand.MarkFlagRequired("package-name"); err != nil {
log.Fatalf("marking package-name flag as required: %s", err)
}
if err := installPackageCommand.MarkFlagRequired("cluster"); err != nil {
log.Fatalf("marking cluster flag as required: %s", err)
}
}
var installPackageCommand = &cobra.Command{
Use: "package [flags] package",
Aliases: []string{"package"},
Short: "Install package",
Long: "This command is used to Install a curated package. Use list to discover curated packages",
PreRunE: preRunPackages,
SilenceUsage: true,
RunE: runInstallPackages,
Args: func(cmd *cobra.Command, args []string) error {
if err := cobra.ExactArgs(1)(cmd, args); err == nil {
return nil
}
return fmt.Errorf("The name of the package to install must be specified as an argument")
},
}
func runInstallPackages(cmd *cobra.Command, args []string) error {
if err := curatedpackages.ValidateKubeVersion(ipo.kubeVersion, ipo.clusterName); err != nil {
return err
}
return installPackages(cmd.Context(), args)
}
func installPackages(ctx context.Context, args []string) error {
kubeConfig, err := kubeconfig.ResolveAndValidateFilename(ipo.kubeConfig, "")
if err != nil {
return err
}
deps, err := NewDependenciesForPackages(ctx, WithRegistryName(ipo.registry), WithKubeVersion(ipo.kubeVersion), WithMountPaths(kubeConfig), WithBundlesOverride(ipo.bundlesOverride))
if err != nil {
return fmt.Errorf("unable to initialize executables: %v", err)
}
bm := curatedpackages.CreateBundleManager(deps.Logger)
b := curatedpackages.NewBundleReader(kubeConfig, ipo.clusterName, deps.Kubectl, bm, deps.BundleRegistry)
bundle, err := b.GetLatestBundle(ctx, ipo.kubeVersion)
if err != nil {
return err
}
packages := curatedpackages.NewPackageClient(
deps.Kubectl,
curatedpackages.WithBundle(bundle),
curatedpackages.WithCustomConfigs(ipo.customConfigs),
)
p, err := packages.GetPackageFromBundle(args[0])
if err != nil {
return err
}
curatedpackages.PrintLicense()
err = packages.InstallPackage(ctx, p, ipo.packageName, ipo.clusterName, kubeConfig)
if err != nil {
return err
}
return nil
}
| 115 |
eks-anywhere | aws | Go | package cmd
import (
"github.com/spf13/cobra"
)
var listCmd = &cobra.Command{
Use: "list",
Short: "List resources",
Long: "Use eksctl anywhere list to list images and artifacts used by EKS Anywhere",
}
func init() {
rootCmd.AddCommand(listCmd)
}
| 16 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.