repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
// DeprecatedALBSecurityGroupsConfig represents security group configuration settings for an ALB.
type DeprecatedALBSecurityGroupsConfig struct {
DeprecatedIngress DeprecatedIngress `yaml:"ingress"` // Deprecated. This field is now available inside PublicHTTPConfig.Ingress and privateHTTPConfig.Ingress field.
}
// IsEmpty returns true if there are no specified fields for ingress.
func (cfg DeprecatedALBSecurityGroupsConfig) IsEmpty() bool {
return cfg.DeprecatedIngress.IsEmpty()
}
// DeprecatedIngress represents allowed ingress traffic from specified fields.
type DeprecatedIngress struct {
RestrictiveIngress RestrictiveIngress `yaml:"restrict_to"` // Deprecated. This field is no more available in any other field.
VPCIngress *bool `yaml:"from_vpc"` //Deprecated. This field is now available in privateHTTPConfig.Ingress.VPCIngress
}
// IsEmpty returns true if there are no specified fields for ingress.
func (i DeprecatedIngress) IsEmpty() bool {
return i.VPCIngress == nil && i.RestrictiveIngress.IsEmpty()
}
| 26 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package manifest provides functionality to create Manifest files.
package manifest
| 6 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"sort"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/template"
"gopkg.in/yaml.v3"
)
// Environmentmanifestinfo identifies that the type of manifest is environment manifest.
const Environmentmanifestinfo = "Environment"
var environmentManifestPath = "environment/manifest.yml"
// Error definitions.
var (
errUnmarshalPortsConfig = errors.New(`unable to unmarshal ports field into int or a range`)
errUnmarshalEnvironmentCDNConfig = errors.New(`unable to unmarshal cdn field into bool or composite-style map`)
errUnmarshalELBAccessLogs = errors.New(`unable to unmarshal access_logs field into bool or ELB Access logs config`)
)
// Environment is the manifest configuration for an environment.
type Environment struct {
Workload `yaml:",inline"`
EnvironmentConfig `yaml:",inline"`
parser template.Parser
}
// EnvironmentProps contains properties for creating a new environment manifest.
type EnvironmentProps struct {
Name string
CustomConfig *config.CustomizeEnv
Telemetry *config.Telemetry
}
// NewEnvironment creates a new environment manifest object.
func NewEnvironment(props *EnvironmentProps) *Environment {
return FromEnvConfig(&config.Environment{
Name: props.Name,
CustomConfig: props.CustomConfig,
Telemetry: props.Telemetry,
}, template.New())
}
// FromEnvConfig transforms an environment configuration into a manifest.
func FromEnvConfig(cfg *config.Environment, parser template.Parser) *Environment {
var vpc environmentVPCConfig
vpc.loadVPCConfig(cfg.CustomConfig)
var http EnvironmentHTTPConfig
http.loadLBConfig(cfg.CustomConfig)
var obs environmentObservability
obs.loadObsConfig(cfg.Telemetry)
return &Environment{
Workload: Workload{
Name: stringP(cfg.Name),
Type: stringP(Environmentmanifestinfo),
},
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: vpc,
},
HTTPConfig: http,
Observability: obs,
},
parser: parser,
}
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (e *Environment) MarshalBinary() ([]byte, error) {
content, err := e.parser.Parse(environmentManifestPath, *e, template.WithFuncs(map[string]interface{}{
"fmtStringSlice": template.FmtSliceFunc,
}))
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// EnvironmentConfig defines the configuration settings for an environment manifest
type EnvironmentConfig struct {
Network environmentNetworkConfig `yaml:"network,omitempty,flow"`
Observability environmentObservability `yaml:"observability,omitempty,flow"`
HTTPConfig EnvironmentHTTPConfig `yaml:"http,omitempty,flow"`
CDNConfig EnvironmentCDNConfig `yaml:"cdn,omitempty,flow"`
}
// IsPublicLBIngressRestrictedToCDN returns whether an environment has its
// Public Load Balancer ingress restricted to a Content Delivery Network.
func (mft *EnvironmentConfig) IsPublicLBIngressRestrictedToCDN() bool {
// Check the fixed manifest first. This would be `http.public.ingress.cdn`.
// For more information, see https://github.com/aws/copilot-cli/pull/4068#issuecomment-1275080333
if !mft.HTTPConfig.Public.Ingress.IsEmpty() {
return aws.BoolValue(mft.HTTPConfig.Public.Ingress.CDNIngress)
}
// Fall through to the old manifest: `http.public.security_groups.ingress.cdn`.
return aws.BoolValue(mft.HTTPConfig.Public.DeprecatedSG.DeprecatedIngress.RestrictiveIngress.CDNIngress)
}
// GetPublicALBSourceIPs returns list of IPNet.
func (mft *EnvironmentConfig) GetPublicALBSourceIPs() []IPNet {
return mft.HTTPConfig.Public.Ingress.SourceIPs
}
type environmentNetworkConfig struct {
VPC environmentVPCConfig `yaml:"vpc,omitempty"`
}
type environmentVPCConfig struct {
ID *string `yaml:"id,omitempty"`
CIDR *IPNet `yaml:"cidr,omitempty"`
Subnets subnetsConfiguration `yaml:"subnets,omitempty"`
SecurityGroupConfig securityGroupConfig `yaml:"security_group,omitempty"`
FlowLogs Union[*bool, VPCFlowLogsArgs] `yaml:"flow_logs,omitempty"`
}
type securityGroupConfig struct {
Ingress []securityGroupRule `yaml:"ingress,omitempty"`
Egress []securityGroupRule `yaml:"egress,omitempty"`
}
func (cfg securityGroupConfig) isEmpty() bool {
return len(cfg.Ingress) == 0 && len(cfg.Egress) == 0
}
// securityGroupRule holds the security group ingress and egress configs.
type securityGroupRule struct {
CidrIP string `yaml:"cidr"`
Ports portsConfig `yaml:"ports"`
IpProtocol string `yaml:"ip_protocol"`
}
// portsConfig represents a range of ports [from:to] inclusive.
// The simple form allow represents from and to ports as a single value, whereas the advanced form is for different values.
type portsConfig struct {
Port *int // 0 is a valid value, so we want the default value to be nil.
Range *IntRangeBand // Mutually exclusive with port.
}
// IsEmpty returns whether PortsConfig is empty.
func (cfg *portsConfig) IsEmpty() bool {
return cfg.Port == nil && cfg.Range == nil
}
// GetPorts returns the from and to ports of a security group rule.
func (r securityGroupRule) GetPorts() (from, to int, err error) {
if r.Ports.Range == nil {
return aws.IntValue(r.Ports.Port), aws.IntValue(r.Ports.Port), nil // a single value is provided for ports.
}
return r.Ports.Range.Parse()
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the Ports
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (cfg *portsConfig) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&cfg.Port); err != nil {
switch err.(type) {
case *yaml.TypeError:
cfg.Port = nil
default:
return err
}
}
if cfg.Port != nil {
// Successfully unmarshalled Port field and unset Ports field, return
cfg.Range = nil
return nil
}
if err := value.Decode(&cfg.Range); err != nil {
return errUnmarshalPortsConfig
}
return nil
}
// VPCFlowLogsArgs holds the flow logs configuration.
type VPCFlowLogsArgs struct {
Retention *int `yaml:"retention,omitempty"`
}
// IsZero implements yaml.IsZeroer.
func (fl *VPCFlowLogsArgs) IsZero() bool {
return fl.Retention == nil
}
// EnvSecurityGroup returns the security group config if the user has set any values.
// If there is no env security group settings, then returns nil and false.
func (cfg *EnvironmentConfig) EnvSecurityGroup() (*securityGroupConfig, bool) {
if isEmpty := cfg.Network.VPC.SecurityGroupConfig.isEmpty(); !isEmpty {
return &cfg.Network.VPC.SecurityGroupConfig, true
}
return nil, false
}
// EnvironmentCDNConfig represents configuration of a CDN.
type EnvironmentCDNConfig struct {
Enabled *bool
Config AdvancedCDNConfig // mutually exclusive with Enabled
}
// AdvancedCDNConfig represents an advanced configuration for a Content Delivery Network.
type AdvancedCDNConfig struct {
Certificate *string `yaml:"certificate,omitempty"`
TerminateTLS *bool `yaml:"terminate_tls,omitempty"`
Static CDNStaticConfig `yaml:"static_assets,omitempty"`
}
// IsEmpty returns whether environmentCDNConfig is empty.
func (cfg *EnvironmentCDNConfig) IsEmpty() bool {
return cfg.Enabled == nil && cfg.Config.isEmpty()
}
// isEmpty returns whether advancedCDNConfig is empty.
func (cfg *AdvancedCDNConfig) isEmpty() bool {
return cfg.Certificate == nil && cfg.TerminateTLS == nil && cfg.Static.IsEmpty()
}
// CDNEnabled returns whether a CDN configuration has been enabled in the environment manifest.
func (cfg *EnvironmentConfig) CDNEnabled() bool {
if !cfg.CDNConfig.Config.isEmpty() {
return true
}
return aws.BoolValue(cfg.CDNConfig.Enabled)
}
// HasImportedPublicALBCerts returns true when the environment's ALB
// is configured with certs for the public listener.
func (cfg *EnvironmentConfig) HasImportedPublicALBCerts() bool {
return cfg.HTTPConfig.Public.Certificates != nil
}
// CDNDoesTLSTermination returns true when the environment's CDN
// is configured to terminate incoming TLS connections.
func (cfg *EnvironmentConfig) CDNDoesTLSTermination() bool {
return aws.BoolValue(cfg.CDNConfig.Config.TerminateTLS)
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the environmentCDNConfig
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (cfg *EnvironmentCDNConfig) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&cfg.Config); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !cfg.Config.isEmpty() {
// Successfully unmarshalled CDNConfig fields, return
return nil
}
if err := value.Decode(&cfg.Enabled); err != nil {
return errUnmarshalEnvironmentCDNConfig
}
return nil
}
// CDNStaticConfig represents the static config for CDN.
type CDNStaticConfig struct {
Location string `yaml:"location,omitempty"`
Alias string `yaml:"alias,omitempty"`
Path string `yaml:"path,omitempty"`
}
// IsEmpty returns true if CDNStaticConfig is not configured.
func (cfg CDNStaticConfig) IsEmpty() bool {
return cfg.Location == "" && cfg.Alias == "" && cfg.Path == ""
}
// IsEmpty returns true if environmentVPCConfig is not configured.
func (cfg environmentVPCConfig) IsEmpty() bool {
return cfg.ID == nil && cfg.CIDR == nil && cfg.Subnets.IsEmpty() && cfg.FlowLogs.IsZero()
}
func (cfg *environmentVPCConfig) loadVPCConfig(env *config.CustomizeEnv) {
if env.IsEmpty() {
return
}
if adjusted := env.VPCConfig; adjusted != nil {
cfg.loadAdjustedVPCConfig(adjusted)
}
if imported := env.ImportVPC; imported != nil {
cfg.loadImportedVPCConfig(imported)
}
}
func (cfg *environmentVPCConfig) loadAdjustedVPCConfig(vpc *config.AdjustVPC) {
cfg.CIDR = ipNetP(vpc.CIDR)
cfg.Subnets.Public = make([]subnetConfiguration, len(vpc.PublicSubnetCIDRs))
cfg.Subnets.Private = make([]subnetConfiguration, len(vpc.PrivateSubnetCIDRs))
for i, cidr := range vpc.PublicSubnetCIDRs {
cfg.Subnets.Public[i].CIDR = ipNetP(cidr)
if len(vpc.AZs) > i {
cfg.Subnets.Public[i].AZ = stringP(vpc.AZs[i])
}
}
for i, cidr := range vpc.PrivateSubnetCIDRs {
cfg.Subnets.Private[i].CIDR = ipNetP(cidr)
if len(vpc.AZs) > i {
cfg.Subnets.Private[i].AZ = stringP(vpc.AZs[i])
}
}
}
func (cfg *environmentVPCConfig) loadImportedVPCConfig(vpc *config.ImportVPC) {
cfg.ID = stringP(vpc.ID)
cfg.Subnets.Public = make([]subnetConfiguration, len(vpc.PublicSubnetIDs))
for i, subnet := range vpc.PublicSubnetIDs {
cfg.Subnets.Public[i].SubnetID = stringP(subnet)
}
cfg.Subnets.Private = make([]subnetConfiguration, len(vpc.PrivateSubnetIDs))
for i, subnet := range vpc.PrivateSubnetIDs {
cfg.Subnets.Private[i].SubnetID = stringP(subnet)
}
}
// UnmarshalEnvironment deserializes the YAML input stream into an environment manifest object.
// If an error occurs during deserialization, then returns the error.
func UnmarshalEnvironment(in []byte) (*Environment, error) {
var m Environment
if err := yaml.Unmarshal(in, &m); err != nil {
return nil, fmt.Errorf("unmarshal environment manifest: %w", err)
}
return &m, nil
}
func (cfg *environmentVPCConfig) imported() bool {
return aws.StringValue(cfg.ID) != ""
}
func (cfg *environmentVPCConfig) managedVPCCustomized() bool {
return aws.StringValue((*string)(cfg.CIDR)) != ""
}
// ImportedVPC returns configurations that import VPC resources if there is any.
func (cfg *environmentVPCConfig) ImportedVPC() *template.ImportVPC {
if !cfg.imported() {
return nil
}
var publicSubnetIDs, privateSubnetIDs []string
for _, subnet := range cfg.Subnets.Public {
publicSubnetIDs = append(publicSubnetIDs, aws.StringValue(subnet.SubnetID))
}
for _, subnet := range cfg.Subnets.Private {
privateSubnetIDs = append(privateSubnetIDs, aws.StringValue(subnet.SubnetID))
}
return &template.ImportVPC{
ID: aws.StringValue(cfg.ID),
PublicSubnetIDs: publicSubnetIDs,
PrivateSubnetIDs: privateSubnetIDs,
}
}
// ManagedVPC returns configurations that configure VPC resources if there is any.
func (cfg *environmentVPCConfig) ManagedVPC() *template.ManagedVPC {
// ASSUMPTION: If the VPC is configured, both pub and private are explicitly configured.
// az is optional. However, if it's configured, it is configured for all subnets.
// In summary:
// 0 = #pub = #priv = #azs (not managed)
// #pub = #priv, #azs = 0 (managed, without configured azs)
// #pub = #priv = #azs (managed, all configured)
if !cfg.managedVPCCustomized() {
return nil
}
publicSubnetCIDRs := make([]string, len(cfg.Subnets.Public))
privateSubnetCIDRs := make([]string, len(cfg.Subnets.Public))
var azs []string
// NOTE: sort based on `az`s to preserve the mappings between azs and public subnets, private subnets.
// For example, if we have two subnets defined: public-subnet-1 ~ us-east-1a, and private-subnet-1 ~ us-east-1a.
// We want to make sure that public-subnet-1, us-east-1a and private-subnet-1 are all at index 0 of in perspective lists.
sort.SliceStable(cfg.Subnets.Public, func(i, j int) bool {
return aws.StringValue(cfg.Subnets.Public[i].AZ) < aws.StringValue(cfg.Subnets.Public[j].AZ)
})
sort.SliceStable(cfg.Subnets.Private, func(i, j int) bool {
return aws.StringValue(cfg.Subnets.Private[i].AZ) < aws.StringValue(cfg.Subnets.Private[j].AZ)
})
for idx, subnet := range cfg.Subnets.Public {
publicSubnetCIDRs[idx] = aws.StringValue((*string)(subnet.CIDR))
privateSubnetCIDRs[idx] = aws.StringValue((*string)(cfg.Subnets.Private[idx].CIDR))
if az := aws.StringValue(subnet.AZ); az != "" {
azs = append(azs, az)
}
}
return &template.ManagedVPC{
CIDR: aws.StringValue((*string)(cfg.CIDR)),
AZs: azs,
PublicSubnetCIDRs: publicSubnetCIDRs,
PrivateSubnetCIDRs: privateSubnetCIDRs,
}
}
type subnetsConfiguration struct {
Public []subnetConfiguration `yaml:"public,omitempty"`
Private []subnetConfiguration `yaml:"private,omitempty"`
}
// IsEmpty returns true if neither public subnets nor private subnets are configured.
func (cs subnetsConfiguration) IsEmpty() bool {
return len(cs.Public) == 0 && len(cs.Private) == 0
}
type subnetConfiguration struct {
SubnetID *string `yaml:"id,omitempty"`
CIDR *IPNet `yaml:"cidr,omitempty"`
AZ *string `yaml:"az,omitempty"`
}
type environmentObservability struct {
ContainerInsights *bool `yaml:"container_insights,omitempty"`
}
// IsEmpty returns true if there is no configuration to the environment's observability.
func (o *environmentObservability) IsEmpty() bool {
return o == nil || o.ContainerInsights == nil
}
func (o *environmentObservability) loadObsConfig(tele *config.Telemetry) {
if tele == nil {
return
}
o.ContainerInsights = &tele.EnableContainerInsights
}
// EnvironmentHTTPConfig defines the configuration settings for an environment group's HTTP connections.
type EnvironmentHTTPConfig struct {
Public PublicHTTPConfig `yaml:"public,omitempty"`
Private privateHTTPConfig `yaml:"private,omitempty"`
}
// IsEmpty returns true if neither the public ALB nor the internal ALB is configured.
func (cfg EnvironmentHTTPConfig) IsEmpty() bool {
return cfg.Public.IsEmpty() && cfg.Private.IsEmpty()
}
func (cfg *EnvironmentHTTPConfig) loadLBConfig(env *config.CustomizeEnv) {
if env.IsEmpty() {
return
}
if env.ImportVPC != nil && len(env.ImportVPC.PublicSubnetIDs) == 0 {
cfg.Private.InternalALBSubnets = env.InternalALBSubnets
cfg.Private.Certificates = env.ImportCertARNs
if env.EnableInternalALBVPCIngress { // NOTE: Do not load the configuration unless it's positive, so that the default manifest does not contain the unnecessary line `http.private.ingress.vpc: false`.
cfg.Private.Ingress.VPCIngress = aws.Bool(true)
}
return
}
cfg.Public.Certificates = env.ImportCertARNs
}
// PublicHTTPConfig represents the configuration settings for an environment public ALB.
type PublicHTTPConfig struct {
DeprecatedSG DeprecatedALBSecurityGroupsConfig `yaml:"security_groups,omitempty"` // Deprecated. This configuration is now available inside Ingress field.
Certificates []string `yaml:"certificates,omitempty"`
ELBAccessLogs ELBAccessLogsArgsOrBool `yaml:"access_logs,omitempty"`
Ingress RestrictiveIngress `yaml:"ingress,omitempty"`
SSLPolicy *string `yaml:"ssl_policy,omitempty"`
}
// ELBAccessLogsArgsOrBool is a custom type which supports unmarshaling yaml which
// can either be of type bool or type ELBAccessLogsArgs.
type ELBAccessLogsArgsOrBool struct {
Enabled *bool
AdvancedConfig ELBAccessLogsArgs
}
func (al *ELBAccessLogsArgsOrBool) isEmpty() bool {
return al.Enabled == nil && al.AdvancedConfig.isEmpty()
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the ELBAccessLogsArgsOrBool
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (al *ELBAccessLogsArgsOrBool) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&al.AdvancedConfig); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !al.AdvancedConfig.isEmpty() {
// Unmarshaled successfully to al.AccessLogsArgs, reset al.EnableAccessLogs, and return.
al.Enabled = nil
return nil
}
if err := value.Decode(&al.Enabled); err != nil {
return errUnmarshalELBAccessLogs
}
return nil
}
// ELBAccessLogsArgs holds the access logs configuration.
type ELBAccessLogsArgs struct {
BucketName *string `yaml:"bucket_name,omitempty"`
Prefix *string `yaml:"prefix,omitempty"`
}
func (al *ELBAccessLogsArgs) isEmpty() bool {
return al.BucketName == nil && al.Prefix == nil
}
// ELBAccessLogs returns the access logs config if the user has set any values.
// If there is no access logs settings, then returns nil and false.
func (cfg *EnvironmentConfig) ELBAccessLogs() (*ELBAccessLogsArgs, bool) {
accessLogs := cfg.HTTPConfig.Public.ELBAccessLogs
if accessLogs.isEmpty() {
return nil, false
}
if accessLogs.Enabled != nil {
return nil, aws.BoolValue(accessLogs.Enabled)
}
return &accessLogs.AdvancedConfig, true
}
// RestrictiveIngress represents ingress fields which restrict
// default behavior of allowing all public ingress.
type RestrictiveIngress struct {
CDNIngress *bool `yaml:"cdn"`
SourceIPs []IPNet `yaml:"source_ips"`
}
// RelaxedIngress contains ingress configuration to add to a security group.
type RelaxedIngress struct {
VPCIngress *bool `yaml:"vpc"`
}
// IsEmpty returns true if there are no specified fields for relaxed ingress.
func (i RelaxedIngress) IsEmpty() bool {
return i.VPCIngress == nil
}
// IsEmpty returns true if there are no specified fields for restrictive ingress.
func (i RestrictiveIngress) IsEmpty() bool {
return i.CDNIngress == nil && len(i.SourceIPs) == 0
}
// IsEmpty returns true if there is no customization to the public ALB.
func (cfg PublicHTTPConfig) IsEmpty() bool {
return len(cfg.Certificates) == 0 && cfg.DeprecatedSG.IsEmpty() && cfg.ELBAccessLogs.isEmpty() && cfg.Ingress.IsEmpty() && cfg.SSLPolicy == nil
}
type privateHTTPConfig struct {
InternalALBSubnets []string `yaml:"subnets,omitempty"`
Certificates []string `yaml:"certificates,omitempty"`
DeprecatedSG DeprecatedALBSecurityGroupsConfig `yaml:"security_groups,omitempty"` // Deprecated. This field is now available in Ingress.
Ingress RelaxedIngress `yaml:"ingress,omitempty"`
SSLPolicy *string `yaml:"ssl_policy,omitempty"`
}
// IsEmpty returns true if there is no customization to the internal ALB.
func (cfg privateHTTPConfig) IsEmpty() bool {
return len(cfg.InternalALBSubnets) == 0 && len(cfg.Certificates) == 0 && cfg.DeprecatedSG.IsEmpty() && cfg.Ingress.IsEmpty() && cfg.SSLPolicy == nil
}
// HasVPCIngress returns true if the private ALB allows ingress from within the VPC.
func (cfg privateHTTPConfig) HasVPCIngress() bool {
return aws.BoolValue(cfg.Ingress.VPCIngress) || aws.BoolValue(cfg.DeprecatedSG.DeprecatedIngress.VPCIngress)
}
| 580 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"testing"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/stretchr/testify/require"
)
func TestFromEnvConfig(t *testing.T) {
testCases := map[string]struct {
in *config.Environment
wanted *Environment
}{
"converts configured VPC settings with availability zones after v1.14": {
in: &config.Environment{
App: "phonetool",
Name: "test",
CustomConfig: &config.CustomizeEnv{
VPCConfig: &config.AdjustVPC{
CIDR: "10.0.0.0/16",
AZs: []string{"us-west-2a", "us-west-2b", "us-west-2c"},
PublicSubnetCIDRs: []string{"10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24"},
PrivateSubnetCIDRs: []string{"10.0.3.0/24", "10.0.4.0/24", "10.0.5.0/24"},
},
},
},
wanted: &Environment{
Workload: Workload{
Name: stringP("test"),
Type: stringP("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
CIDR: ipNetP("10.0.0.0/16"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: ipNetP("10.0.0.0/24"),
AZ: stringP("us-west-2a"),
},
{
CIDR: ipNetP("10.0.1.0/24"),
AZ: stringP("us-west-2b"),
},
{
CIDR: ipNetP("10.0.2.0/24"),
AZ: stringP("us-west-2c"),
},
},
Private: []subnetConfiguration{
{
CIDR: ipNetP("10.0.3.0/24"),
AZ: stringP("us-west-2a"),
},
{
CIDR: ipNetP("10.0.4.0/24"),
AZ: stringP("us-west-2b"),
},
{
CIDR: ipNetP("10.0.5.0/24"),
AZ: stringP("us-west-2c"),
},
},
},
},
},
},
},
},
"converts configured VPC settings without any availability zones set": {
in: &config.Environment{
App: "phonetool",
Name: "test",
CustomConfig: &config.CustomizeEnv{
VPCConfig: &config.AdjustVPC{
CIDR: "10.0.0.0/16",
PublicSubnetCIDRs: []string{"10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24"},
PrivateSubnetCIDRs: []string{"10.0.3.0/24", "10.0.4.0/24", "10.0.5.0/24"},
},
},
},
wanted: &Environment{
Workload: Workload{
Name: stringP("test"),
Type: stringP("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
CIDR: ipNetP("10.0.0.0/16"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: ipNetP("10.0.0.0/24"),
},
{
CIDR: ipNetP("10.0.1.0/24"),
},
{
CIDR: ipNetP("10.0.2.0/24"),
},
},
Private: []subnetConfiguration{
{
CIDR: ipNetP("10.0.3.0/24"),
},
{
CIDR: ipNetP("10.0.4.0/24"),
},
{
CIDR: ipNetP("10.0.5.0/24"),
},
},
},
},
},
},
},
},
"converts imported VPC settings": {
in: &config.Environment{
App: "phonetool",
Name: "test",
CustomConfig: &config.CustomizeEnv{
ImportVPC: &config.ImportVPC{
ID: "vpc-3f139646",
PublicSubnetIDs: []string{"pub1", "pub2", "pub3"},
PrivateSubnetIDs: []string{"priv1", "priv2", "priv3"},
},
},
},
wanted: &Environment{
Workload: Workload{
Name: stringP("test"),
Type: stringP("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
ID: stringP("vpc-3f139646"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: stringP("pub1"),
},
{
SubnetID: stringP("pub2"),
},
{
SubnetID: stringP("pub3"),
},
},
Private: []subnetConfiguration{
{
SubnetID: stringP("priv1"),
},
{
SubnetID: stringP("priv2"),
},
{
SubnetID: stringP("priv3"),
},
},
},
},
},
},
},
},
"converts imported certificates for a public load balancer": {
in: &config.Environment{
App: "phonetool",
Name: "test",
CustomConfig: &config.CustomizeEnv{
ImportCertARNs: []string{"arn:aws:acm:region:account:certificate/certificate_ID_1", "arn:aws:acm:region:account:certificate/certificate_ID_2"},
ImportVPC: &config.ImportVPC{
PublicSubnetIDs: []string{"subnet1"},
},
},
},
wanted: &Environment{
Workload: Workload{
Name: stringP("test"),
Type: stringP("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("subnet1"),
CIDR: nil,
AZ: nil,
},
},
Private: []subnetConfiguration{},
},
},
},
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"arn:aws:acm:region:account:certificate/certificate_ID_1", "arn:aws:acm:region:account:certificate/certificate_ID_2"},
},
},
},
},
},
"converts imported certificates for a public load balancer without an imported vpc": {
in: &config.Environment{
App: "phonetool",
Name: "test",
CustomConfig: &config.CustomizeEnv{
ImportCertARNs: []string{"arn:aws:acm:region:account:certificate/certificate_ID_1", "arn:aws:acm:region:account:certificate/certificate_ID_2"},
},
},
wanted: &Environment{
Workload: Workload{
Name: stringP("test"),
Type: stringP("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"arn:aws:acm:region:account:certificate/certificate_ID_1", "arn:aws:acm:region:account:certificate/certificate_ID_2"},
},
},
},
},
},
"converts imported certificates for a private load balancer with subnet placement specified": {
in: &config.Environment{
App: "phonetool",
Name: "test",
CustomConfig: &config.CustomizeEnv{
ImportCertARNs: []string{"arn:aws:acm:region:account:certificate/certificate_ID_1", "arn:aws:acm:region:account:certificate/certificate_ID_2"},
ImportVPC: &config.ImportVPC{
PrivateSubnetIDs: []string{"subnet1", "subnet2"},
},
InternalALBSubnets: []string{"subnet2"},
},
},
wanted: &Environment{
Workload: Workload{
Name: stringP("test"),
Type: stringP("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
Subnets: subnetsConfiguration{
Private: []subnetConfiguration{
{
SubnetID: aws.String("subnet1"),
},
{
SubnetID: aws.String("subnet2"),
},
},
Public: []subnetConfiguration{},
},
},
},
HTTPConfig: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
InternalALBSubnets: []string{"subnet2"},
Certificates: []string{"arn:aws:acm:region:account:certificate/certificate_ID_1", "arn:aws:acm:region:account:certificate/certificate_ID_2"},
},
},
},
},
},
"converts container insights": {
in: &config.Environment{
App: "phonetool",
Name: "test",
Telemetry: &config.Telemetry{
EnableContainerInsights: false,
},
},
wanted: &Environment{
Workload: Workload{
Name: stringP("test"),
Type: stringP("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Observability: environmentObservability{
ContainerInsights: aws.Bool(false),
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, FromEnvConfig(tc.in, nil))
})
}
}
func Test_UnmarshalEnvironment(t *testing.T) {
var (
mockVPCCIDR = IPNet("10.0.0.0/16")
mockPublicSubnet1CIDR = IPNet("10.0.0.0/24")
mockPublicSubnet2CIDR = IPNet("10.0.1.0/24")
mockPrivateSubnet1CIDR = IPNet("10.0.3.0/24")
mockPrivateSubnet2CIDR = IPNet("10.0.4.0/24")
)
testCases := map[string]struct {
inContent string
wantedStruct *Environment
wantedErrPrefix string
}{
"unmarshal with managed VPC": {
inContent: `name: test
type: Environment
network:
vpc:
cidr: '10.0.0.0/16'
subnets:
public:
- cidr: '10.0.0.0/24'
az: 'us-east-2a'
- cidr: '10.0.1.0/24'
az: 'us-east-2b'
private:
- cidr: '10.0.3.0/24'
az: 'us-east-2a'
- cidr: '10.0.4.0/24'
az: 'us-east-2b'
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("test"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
},
},
},
},
},
},
},
"unmarshal with enable access logs": {
inContent: `name: prod
type: Environment
http:
public:
access_logs: true`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
ELBAccessLogs: ELBAccessLogsArgsOrBool{
Enabled: aws.Bool(true),
},
},
},
},
},
},
"unmarshal with advanced access logs": {
inContent: `name: prod
type: Environment
http:
public:
access_logs:
bucket_name: testbucket
prefix: prefix`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
ELBAccessLogs: ELBAccessLogsArgsOrBool{
AdvancedConfig: ELBAccessLogsArgs{
Prefix: aws.String("prefix"),
BucketName: aws.String("testbucket"),
},
},
},
},
},
},
},
"unmarshal with observability": {
inContent: `name: prod
type: Environment
observability:
container_insights: true
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
Observability: environmentObservability{
ContainerInsights: aws.Bool(true),
},
},
},
},
"unmarshal with content delivery network bool": {
inContent: `name: prod
type: Environment
cdn: true
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Enabled: aws.Bool(true),
},
},
},
},
"unmarshal with http": {
inContent: `name: prod
type: Environment
http:
public:
certificates:
- cert-1
- cert-2
private:
security_groups:
ingress:
from_vpc: false
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"cert-1", "cert-2"},
},
Private: privateHTTPConfig{
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
VPCIngress: aws.Bool(false),
},
},
},
},
},
},
},
"unmarshal with new http fields": {
inContent: `name: prod
type: Environment
http:
public:
certificates:
- cert-1
- cert-2
private:
ingress:
vpc: true
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"cert-1", "cert-2"},
},
Private: privateHTTPConfig{
Ingress: RelaxedIngress{VPCIngress: aws.Bool(true)},
},
},
},
},
},
"unmarshal with new and old private http fields": {
inContent: `name: prod
type: Environment
http:
public:
certificates:
- cert-1
- cert-2
private:
security_groups:
ingress:
from_vpc: true
ingress:
vpc: true
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"cert-1", "cert-2"},
},
Private: privateHTTPConfig{
Ingress: RelaxedIngress{VPCIngress: aws.Bool(true)},
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
VPCIngress: aws.Bool(true),
},
},
},
},
},
},
},
"unmarshal with new and old public http fields": {
inContent: `name: prod
type: Environment
http:
public:
certificates:
- cert-1
- cert-2
security_groups:
ingress:
restrict_to:
cdn: true
ingress:
cdn: true
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"cert-1", "cert-2"},
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
RestrictiveIngress: RestrictiveIngress{
CDNIngress: aws.Bool(true),
},
},
},
Ingress: RestrictiveIngress{CDNIngress: aws.Bool(true)},
},
},
},
},
},
"unmarshal with source_ips field in http.public": {
inContent: `name: prod
type: Environment
http:
public:
certificates:
- cert-1
- cert-2
security_groups:
ingress:
restrict_to:
cdn: true
ingress:
source_ips:
- 1.1.1.1
- 2.2.2.2
`,
wantedStruct: &Environment{
Workload: Workload{
Name: aws.String("prod"),
Type: aws.String("Environment"),
},
EnvironmentConfig: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"cert-1", "cert-2"},
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{DeprecatedIngress: DeprecatedIngress{RestrictiveIngress: RestrictiveIngress{CDNIngress: aws.Bool(true)}}},
Ingress: RestrictiveIngress{SourceIPs: []IPNet{"1.1.1.1", "2.2.2.2"}},
},
},
},
},
},
"fail to unmarshal": {
inContent: `watermelon in easter hay`,
wantedErrPrefix: "unmarshal environment manifest: ",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got, gotErr := UnmarshalEnvironment([]byte(tc.inContent))
if tc.wantedErrPrefix != "" {
require.ErrorContains(t, gotErr, tc.wantedErrPrefix)
} else {
require.NoError(t, gotErr)
require.Equal(t, tc.wantedStruct, got)
}
})
}
}
func TestEnvironmentVPCConfig_ImportedVPC(t *testing.T) {
testCases := map[string]struct {
inVPCConfig environmentVPCConfig
wanted *template.ImportVPC
}{
"vpc not imported": {},
"only public subnets imported": {
inVPCConfig: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("subnet-123"),
},
{
SubnetID: aws.String("subnet-456"),
},
},
},
},
wanted: &template.ImportVPC{
ID: "vpc-1234",
PublicSubnetIDs: []string{"subnet-123", "subnet-456"},
},
},
"only private subnets imported": {
inVPCConfig: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Private: []subnetConfiguration{
{
SubnetID: aws.String("subnet-123"),
},
{
SubnetID: aws.String("subnet-456"),
},
},
},
},
wanted: &template.ImportVPC{
ID: "vpc-1234",
PrivateSubnetIDs: []string{"subnet-123", "subnet-456"},
},
},
"both subnets imported": {
inVPCConfig: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("subnet-123"),
},
{
SubnetID: aws.String("subnet-456"),
},
},
Private: []subnetConfiguration{
{
SubnetID: aws.String("subnet-789"),
},
{
SubnetID: aws.String("subnet-012"),
},
},
},
},
wanted: &template.ImportVPC{
ID: "vpc-1234",
PublicSubnetIDs: []string{"subnet-123", "subnet-456"},
PrivateSubnetIDs: []string{"subnet-789", "subnet-012"},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.inVPCConfig.ImportedVPC()
require.Equal(t, tc.wanted, got)
})
}
}
func TestEnvironmentVPCConfig_ManagedVPC(t *testing.T) {
var (
mockVPCCIDR = IPNet("10.0.0.0/16")
mockPublicSubnet1CIDR = IPNet("10.0.0.0/24")
mockPublicSubnet2CIDR = IPNet("10.0.1.0/24")
mockPublicSubnet3CIDR = IPNet("10.0.2.0/24")
mockPrivateSubnet1CIDR = IPNet("10.0.3.0/24")
mockPrivateSubnet2CIDR = IPNet("10.0.4.0/24")
mockPrivateSubnet3CIDR = IPNet("10.0.5.0/24")
)
testCases := map[string]struct {
inVPCConfig environmentVPCConfig
wanted *template.ManagedVPC
}{
"default vpc without custom configuration": {},
"with imported vpc": {
inVPCConfig: environmentVPCConfig{
ID: aws.String("vpc-1234"),
},
},
"ensure custom configuration is sorted by AZ": {
inVPCConfig: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet3CIDR,
AZ: aws.String("us-east-2c"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet3CIDR,
AZ: aws.String("us-east-2c"),
},
},
},
},
wanted: &template.ManagedVPC{
CIDR: string(mockVPCCIDR),
AZs: []string{"us-east-2a", "us-east-2b", "us-east-2c"},
PublicSubnetCIDRs: []string{string(mockPublicSubnet1CIDR), string(mockPublicSubnet2CIDR), string(mockPublicSubnet3CIDR)},
PrivateSubnetCIDRs: []string{string(mockPrivateSubnet1CIDR), string(mockPrivateSubnet2CIDR), string(mockPrivateSubnet3CIDR)},
},
},
"managed vpc without explicitly configured azs": {
inVPCConfig: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
},
{
CIDR: &mockPublicSubnet3CIDR,
},
{
CIDR: &mockPublicSubnet2CIDR,
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet2CIDR,
},
{
CIDR: &mockPrivateSubnet1CIDR,
},
{
CIDR: &mockPrivateSubnet3CIDR,
},
},
},
},
wanted: &template.ManagedVPC{
CIDR: string(mockVPCCIDR),
PublicSubnetCIDRs: []string{string(mockPublicSubnet1CIDR), string(mockPublicSubnet3CIDR), string(mockPublicSubnet2CIDR)},
PrivateSubnetCIDRs: []string{string(mockPrivateSubnet2CIDR), string(mockPrivateSubnet1CIDR), string(mockPrivateSubnet3CIDR)},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.inVPCConfig.ManagedVPC()
require.Equal(t, tc.wanted, got)
})
}
}
func TestEnvironmentVPCConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in environmentVPCConfig
wanted bool
}{
"empty": {
wanted: true,
},
"not empty when VPC ID is provided": {
in: environmentVPCConfig{
ID: aws.String("mock-vpc-id"),
},
},
"not empty when flowlog is on": {
in: environmentVPCConfig{
FlowLogs: Union[*bool, VPCFlowLogsArgs]{
Basic: aws.Bool(true),
},
},
wanted: true,
},
"not empty when flowlog with specific retention": {
in: environmentVPCConfig{
FlowLogs: Union[*bool, VPCFlowLogsArgs]{
Advanced: VPCFlowLogsArgs{
Retention: aws.Int(60),
},
},
},
wanted: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestSubnetsConfiguration_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in subnetsConfiguration
wanted bool
}{
"empty": {
wanted: true,
},
"not empty": {
in: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("mock-subnet-id"),
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestCDNStaticConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in CDNStaticConfig
wanted bool
}{
"empty": {
wanted: true,
},
"not empty": {
in: CDNStaticConfig{
Path: "something",
},
wanted: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestEnvironmentHTTPConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in EnvironmentHTTPConfig
wanted bool
}{
"empty": {
wanted: true,
},
"not empty": {
in: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"mock-cert"},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestPublicHTTPConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in PublicHTTPConfig
wanted bool
}{
"empty": {
wanted: true,
},
"not empty when Certificates are attached": {
in: PublicHTTPConfig{
Certificates: []string{"mock-cert-1"},
},
},
"not empty when SSL Policy is present": {
in: PublicHTTPConfig{
SSLPolicy: aws.String("mock-ELB-ELBSecurityPolicy"),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestPrivateHTTPConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in privateHTTPConfig
wanted bool
}{
"empty": {
wanted: true,
},
"not empty when Certificates are attached": {
in: privateHTTPConfig{
InternalALBSubnets: []string{"mock-subnet-1"},
},
},
"not empty when SSL Policy is present": {
in: privateHTTPConfig{
SSLPolicy: aws.String("mock-ELB-ELBSecurityPolicy"),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestEnvironmentObservability_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in environmentObservability
wanted bool
}{
"empty": {
in: environmentObservability{},
wanted: true,
},
"not empty": {
in: environmentObservability{
ContainerInsights: aws.Bool(false),
},
wanted: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestEnvironmentCDNConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in EnvironmentCDNConfig
wanted bool
}{
"empty": {
in: EnvironmentCDNConfig{},
wanted: true,
},
"not empty": {
in: EnvironmentCDNConfig{
Enabled: aws.Bool(false),
},
wanted: false,
},
"advanced not empty": {
in: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
},
},
wanted: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.IsEmpty()
require.Equal(t, tc.wanted, got)
})
}
}
func TestEnvironmentConfig_CDNEnabled(t *testing.T) {
testCases := map[string]struct {
in EnvironmentConfig
wanted bool
}{
"enabled via bool": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Enabled: aws.Bool(true),
},
},
wanted: true,
},
"enabled via config": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
},
},
},
wanted: true,
},
"not enabled because empty": {
in: EnvironmentConfig{},
wanted: false,
},
"not enabled via bool": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Enabled: aws.Bool(false),
},
},
wanted: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := tc.in.CDNEnabled()
require.Equal(t, tc.wanted, got)
})
}
}
func TestEnvironmentConfig_ELBAccessLogs(t *testing.T) {
testCases := map[string]struct {
in EnvironmentConfig
wantedFlag bool
wantedConfigs *ELBAccessLogsArgs
}{
"enabled via bool": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
ELBAccessLogs: ELBAccessLogsArgsOrBool{
Enabled: aws.Bool(true),
},
},
},
},
wantedFlag: true,
wantedConfigs: nil,
},
"disabled via bool": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
ELBAccessLogs: ELBAccessLogsArgsOrBool{
Enabled: aws.Bool(false),
},
},
},
},
wantedFlag: false,
wantedConfigs: nil,
},
"advanced access logs config": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
ELBAccessLogs: ELBAccessLogsArgsOrBool{
AdvancedConfig: ELBAccessLogsArgs{
Prefix: aws.String("prefix"),
BucketName: aws.String("bucketname"),
},
},
},
},
},
wantedFlag: true,
wantedConfigs: &ELBAccessLogsArgs{
BucketName: aws.String("bucketname"),
Prefix: aws.String("prefix"),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
elbAccessLogs, flag := tc.in.ELBAccessLogs()
require.Equal(t, tc.wantedFlag, flag)
require.Equal(t, tc.wantedConfigs, elbAccessLogs)
})
}
}
| 1,177 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"fmt"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudfront"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/dustin/go-humanize/english"
"strconv"
"strings"
)
// ErrInvalidWorkloadType occurs when a user requested a manifest template type that doesn't exist.
type ErrInvalidWorkloadType struct {
Type string
}
func (e *ErrInvalidWorkloadType) Error() string {
return fmt.Sprintf("invalid manifest type: %s", e.Type)
}
// ErrInvalidPipelineManifestVersion occurs when the pipeline.yml/manifest.yml file
// contains invalid schema version during unmarshalling.
type ErrInvalidPipelineManifestVersion struct {
invalidVersion PipelineSchemaMajorVersion
}
func (e *ErrInvalidPipelineManifestVersion) Error() string {
return fmt.Sprintf("pipeline manifest contains invalid schema version: %d", e.invalidVersion)
}
// Is compares the 2 errors. Only returns true if the errors are of the same
// type and contain the same information.
func (e *ErrInvalidPipelineManifestVersion) Is(target error) bool {
t, ok := target.(*ErrInvalidPipelineManifestVersion)
return ok && t.invalidVersion == e.invalidVersion
}
// ErrUnknownProvider occurs CreateProvider() is called with configurations
// that do not map to any supported provider.
type ErrUnknownProvider struct {
unknownProviderProperties interface{}
}
func (e *ErrUnknownProvider) Error() string {
return fmt.Sprintf("no provider found for properties: %v",
e.unknownProviderProperties)
}
// Is compares the 2 errors. Returns true if the errors are of the same
// type
func (e *ErrUnknownProvider) Is(target error) bool {
_, ok := target.(*ErrUnknownProvider)
return ok
}
type errFieldMustBeSpecified struct {
missingField string
conditionalFields []string
allMustBeSpecified bool
}
func (e *errFieldMustBeSpecified) Error() string {
errMsg := fmt.Sprintf(`%q must be specified`, e.missingField)
if len(e.conditionalFields) == 0 {
return errMsg
}
conjunction := "or"
if e.allMustBeSpecified {
conjunction = "and"
}
return fmt.Sprintf(`%s if %s %s specified`, errMsg, english.WordSeries(quoteStringSlice(e.conditionalFields), conjunction),
english.PluralWord(len(e.conditionalFields), "is", "are"))
}
type errInvalidAutoscalingFieldsWithWkldType struct {
invalidFields []string
workloadType string
}
func (e *errInvalidAutoscalingFieldsWithWkldType) Error() string {
return fmt.Sprintf("autoscaling %v %v %v invalid with workload type %v", english.PluralWord(len(e.invalidFields), "field", "fields"),
english.WordSeries(template.QuoteSliceFunc(e.invalidFields), "and"), english.PluralWord(len(e.invalidFields), "is", "are"), e.workloadType)
}
type errFieldMutualExclusive struct {
firstField string
secondField string
mustExist bool
}
func (e *errFieldMutualExclusive) Error() string {
if e.mustExist {
return fmt.Sprintf(`must specify one of "%s" and "%s"`, e.firstField, e.secondField)
}
return fmt.Sprintf(`must specify one, not both, of "%s" and "%s"`, e.firstField, e.secondField)
}
type errGracePeriodsInBothALBAndNLB struct {
errFieldMutualExclusive
}
func (e *errGracePeriodsInBothALBAndNLB) RecommendedActions() string {
return `"grace_period" is a configuration shared by "http" and "nlb". Specify it only once under either "http" or "nlb".`
}
type errGracePeriodSpecifiedInAdditionalListener struct {
index int
}
func (e *errGracePeriodSpecifiedInAdditionalListener) Error() string {
return fmt.Sprintf(`"grace_period" specified for "nlb.additional_listeners[%d]"`, e.index)
}
// RecommendActions returns recommended actions to be taken after the error.
func (e *errGracePeriodSpecifiedInAdditionalListener) RecommendActions() string {
return fmt.Sprintf(`Instead of under "nlb.additional_listeners[%d].healthcheck", specify "grace_period" under the top-level "nlb.healthcheck".`, e.index)
}
type errGracePeriodSpecifiedInAdditionalRule struct {
index int
}
func (e *errGracePeriodSpecifiedInAdditionalRule) Error() string {
return fmt.Sprintf(`"grace_period" specified for "http.additional_rules[%d]"`, e.index)
}
// RecommendActions returns recommended actions to be taken after the error.
func (e *errGracePeriodSpecifiedInAdditionalRule) RecommendActions() string {
return fmt.Sprintf(`Instead of under "http.additional_rules[%d].healthcheck", specify "grace_period" under the top-level "http.healthcheck".`, e.index)
}
type errSpecifiedBothIngressFields struct {
firstField string
secondField string
}
func (e *errSpecifiedBothIngressFields) Error() string {
return fmt.Sprintf(`must specify one, not both, of "%s" and "%s"`, e.firstField, e.secondField)
}
// RecommendActions returns recommended actions to be taken after the error.
func (e *errSpecifiedBothIngressFields) RecommendActions() string {
privateOrPublicField := strings.Split(e.firstField, ".")[0]
if privateOrPublicField == "public" {
return `
It looks like you specified ingress under both "http.public.security_groups.ingress" and "http.public.ingress".
After Copilot v1.23.0, we have deprecated "http.public.security_groups.ingress" in favor of "http.public.ingress".
This means that "http.public.security_groups.ingress.cdn" is removed in favor of "http.public.ingress.cdn".
With the new release manifest configuration for cdn looks like:
http:
public:
ingress:
cdn: true
`
}
return `
It looks like you specified ingress under both "http.private.security_groups.ingress" and "http.private.ingress".
After Copilot v1.23.0, we have deprecated "http.private.security_groups.ingress" in favor of "http.private.ingress".
This means that "http.private.security_groups.ingress.from_vpc" is removed in favor of "http.private.ingress.vpc".
With the new release manifest configuration for vpc looks like:
http:
private:
ingress:
vpc: true
`
}
type errRangeValueLessThanZero struct {
min int
max int
spotFrom int
}
func (e *errRangeValueLessThanZero) Error() string {
return fmt.Sprintf("min value %d, max value %d, and spot_from value %d must all be positive", e.min, e.max, e.spotFrom)
}
type errMinGreaterThanMax struct {
min int
max int
}
func (e *errMinGreaterThanMax) Error() string {
return fmt.Sprintf("min value %d cannot be greater than max value %d", e.min, e.max)
}
type errAtLeastOneFieldMustBeSpecified struct {
missingFields []string
conditionalField string
}
func (e *errAtLeastOneFieldMustBeSpecified) Error() string {
errMsg := fmt.Sprintf("must specify at least one of %s", english.WordSeries(quoteStringSlice(e.missingFields), "or"))
if e.conditionalField != "" {
errMsg = fmt.Sprintf(`%s if "%s" is specified`, errMsg, e.conditionalField)
}
return errMsg
}
type errInvalidCloudFrontRegion struct{}
func (e *errInvalidCloudFrontRegion) Error() string {
return fmt.Sprintf(`cdn certificate must be in region %s`, cloudfront.CertRegion)
}
// RecommendActions returns recommended actions to be taken after the error.
func (e *errInvalidCloudFrontRegion) RecommendActions() string {
return fmt.Sprintf(`It looks like your CloudFront certificate is in the wrong region. CloudFront only supports certificates in %s.
We recommend creating a duplicate certificate in the %s region through AWS Certificate Manager.
More information: https://go.aws/3BMxY4J`, cloudfront.CertRegion, cloudfront.CertRegion)
}
func quoteStringSlice(in []string) []string {
quoted := make([]string, len(in))
for idx, str := range in {
quoted[idx] = strconv.Quote(str)
}
return quoted
}
type errContainersExposingSamePort struct {
firstContainer string
secondContainer string
port uint16
}
func (e *errContainersExposingSamePort) Error() string {
return fmt.Sprintf(`containers %q and %q are exposing the same port %d`, e.firstContainer, e.secondContainer, e.port)
}
| 238 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"gopkg.in/yaml.v3"
)
// HTTPOrBool holds advanced configuration for routing rule or a boolean switch.
type HTTPOrBool struct {
HTTP
Enabled *bool
}
// Disabled returns true if the routing rule configuration is explicitly disabled.
func (r *HTTPOrBool) Disabled() bool {
return r.Enabled != nil && !aws.BoolValue(r.Enabled)
}
// UnmarshalYAML implements the yaml(v3) interface. It allows https routing rule to be specified as a
// bool or a struct alternately.
func (r *HTTPOrBool) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&r.HTTP); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !r.HTTP.IsEmpty() {
// Unmarshalled successfully to r.HTTP, unset r.Enabled, and return.
r.Enabled = nil
// this assignment lets us treat the main listener rule and additional listener rules equally
// because we eliminate the need for TargetContainerCamelCase by assigning its value to TargetContainer.
if r.TargetContainerCamelCase != nil && r.Main.TargetContainer == nil {
r.Main.TargetContainer = r.TargetContainerCamelCase
r.TargetContainerCamelCase = nil
}
return nil
}
if err := value.Decode(&r.Enabled); err != nil {
return errors.New(`cannot marshal "http" field into bool or map`)
}
return nil
}
// HTTP holds options for application load balancer.
type HTTP struct {
Main RoutingRule `yaml:",inline"`
TargetContainerCamelCase *string `yaml:"targetContainer"` // Deprecated. Maintained for backwards compatibility, use [RoutingRule.TargetContainer] instead.
AdditionalRoutingRules []RoutingRule `yaml:"additional_rules"`
}
// RoutingRules returns main as well as additional routing rules as a list of RoutingRule.
func (cfg HTTP) RoutingRules() []RoutingRule {
if cfg.Main.IsEmpty() {
return nil
}
return append([]RoutingRule{cfg.Main}, cfg.AdditionalRoutingRules...)
}
// IsEmpty returns true if HTTP has empty configuration.
func (r *HTTP) IsEmpty() bool {
return r.Main.IsEmpty() && r.TargetContainerCamelCase == nil && len(r.AdditionalRoutingRules) == 0
}
// RoutingRule holds listener rule configuration for ALB.
type RoutingRule struct {
Path *string `yaml:"path"`
ProtocolVersion *string `yaml:"version"`
HealthCheck HealthCheckArgsOrString `yaml:"healthcheck"`
Stickiness *bool `yaml:"stickiness"`
Alias Alias `yaml:"alias"`
DeregistrationDelay *time.Duration `yaml:"deregistration_delay"`
// TargetContainer is the container load balancer routes traffic to.
TargetContainer *string `yaml:"target_container"`
TargetPort *uint16 `yaml:"target_port"`
AllowedSourceIps []IPNet `yaml:"allowed_source_ips"`
HostedZone *string `yaml:"hosted_zone"`
// RedirectToHTTPS configures a HTTP->HTTPS redirect. If nil, default to true.
RedirectToHTTPS *bool `yaml:"redirect_to_https"`
}
// IsEmpty returns true if RoutingRule has empty configuration.
func (r *RoutingRule) IsEmpty() bool {
return r.Path == nil && r.ProtocolVersion == nil && r.HealthCheck.IsZero() && r.Stickiness == nil && r.Alias.IsEmpty() &&
r.DeregistrationDelay == nil && r.TargetContainer == nil && r.TargetPort == nil && r.AllowedSourceIps == nil &&
r.HostedZone == nil && r.RedirectToHTTPS == nil
}
// IPNet represents an IP network string. For example: 10.1.0.0/16
type IPNet string
func ipNetP(s string) *IPNet {
if s == "" {
return nil
}
ip := IPNet(s)
return &ip
}
// AdvancedAlias represents advanced alias configuration.
type AdvancedAlias struct {
Alias *string `yaml:"name"`
HostedZone *string `yaml:"hosted_zone"`
}
// Alias is a custom type which supports unmarshaling "http.alias" yaml which
// can either be of type advancedAlias slice or type StringSliceOrString.
type Alias struct {
AdvancedAliases []AdvancedAlias
StringSliceOrString StringSliceOrString
}
// HostedZones returns all the hosted zones.
func (a *Alias) HostedZones() []string {
var hostedZones []string
for _, alias := range a.AdvancedAliases {
if alias.HostedZone != nil {
hostedZones = append(hostedZones, *alias.HostedZone)
}
}
return hostedZones
}
// IsEmpty returns empty if Alias is empty.
func (a *Alias) IsEmpty() bool {
return len(a.AdvancedAliases) == 0 && a.StringSliceOrString.isEmpty()
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the Alias
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (a *Alias) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&a.AdvancedAliases); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if len(a.AdvancedAliases) != 0 {
// Unmarshaled successfully to s.StringSlice, unset s.String, and return.
a.StringSliceOrString = StringSliceOrString{}
return nil
}
if err := a.StringSliceOrString.UnmarshalYAML(value); err != nil {
return errUnmarshalAlias
}
return nil
}
// ToStringSlice converts an Alias to a slice of string.
func (a *Alias) ToStringSlice() ([]string, error) {
if len(a.AdvancedAliases) == 0 {
return a.StringSliceOrString.ToStringSlice(), nil
}
aliases := make([]string, len(a.AdvancedAliases))
for i, advancedAlias := range a.AdvancedAliases {
aliases[i] = aws.StringValue(advancedAlias.Alias)
}
return aliases, nil
}
// ToString converts an Alias to a string.
func (a *Alias) ToString() string {
if len(a.AdvancedAliases) != 0 {
aliases := make([]string, len(a.AdvancedAliases))
for i, advancedAlias := range a.AdvancedAliases {
aliases[i] = aws.StringValue(advancedAlias.Alias)
}
return strings.Join(aliases, ",")
}
if a.StringSliceOrString.String != nil {
return aws.StringValue(a.StringSliceOrString.String)
}
return strings.Join(a.StringSliceOrString.StringSlice, ",")
}
| 190 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestHTTPOrBool_Disabled(t *testing.T) {
testCases := map[string]struct {
in HTTPOrBool
wanted bool
}{
"disabled": {
in: HTTPOrBool{
Enabled: aws.Bool(false),
},
wanted: true,
},
"enabled implicitly": {
in: HTTPOrBool{},
},
"enabled explicitly": {
in: HTTPOrBool{
Enabled: aws.Bool(true),
},
},
"enabled explicitly by advanced configuration": {
in: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("mockPath"),
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.Disabled()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestAlias_HostedZones(t *testing.T) {
testCases := map[string]struct {
in Alias
wanted []string
}{
"no hosted zone": {
in: Alias{
AdvancedAliases: []AdvancedAlias{},
},
wanted: []string{},
},
"with hosted zones": {
in: Alias{
AdvancedAliases: []AdvancedAlias{
{
HostedZone: aws.String("mockHostedZone1"),
},
{
HostedZone: aws.String("mockHostedZone2"),
},
},
},
wanted: []string{"mockHostedZone1", "mockHostedZone2"},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.HostedZones()
// THEN
require.ElementsMatch(t, tc.wanted, got)
})
}
}
func TestAlias_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct Alias
wantedError error
}{
"Alias specified in string": {
inContent: []byte(`alias: foobar.com`),
wantedStruct: Alias{
StringSliceOrString: StringSliceOrString{
String: aws.String("foobar.com"),
},
},
},
"Alias specified in slice of strings": {
inContent: []byte(`alias:
- example.com
- v1.example.com`),
wantedStruct: Alias{
StringSliceOrString: StringSliceOrString{
StringSlice: []string{"example.com", "v1.example.com"},
},
AdvancedAliases: []AdvancedAlias{},
},
},
"Alias specified in advanced alias slice": {
inContent: []byte(`alias:
- name: example.com
hosted_zone: Z0873220N255IR3MTNR4
- name: foobar.com`),
wantedStruct: Alias{
AdvancedAliases: []AdvancedAlias{
{
Alias: aws.String("example.com"),
HostedZone: aws.String("Z0873220N255IR3MTNR4"),
},
{
Alias: aws.String("foobar.com"),
},
},
},
},
"Error if unmarshalable": {
inContent: []byte(`alias:
foo: bar`),
wantedStruct: Alias{
StringSliceOrString: StringSliceOrString{},
AdvancedAliases: []AdvancedAlias{},
},
wantedError: errUnmarshalAlias,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
r := HTTP{
Main: RoutingRule{
Alias: Alias{
StringSliceOrString: StringSliceOrString{
String: aws.String("wrong"),
},
},
},
}
err := yaml.Unmarshal(tc.inContent, &r)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
// check memberwise dereferenced pointer equality
require.Equal(t, tc.wantedStruct.StringSliceOrString, r.Main.Alias.StringSliceOrString)
require.Equal(t, tc.wantedStruct.AdvancedAliases, r.Main.Alias.AdvancedAliases)
}
})
}
}
func TestAlias_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in Alias
wanted bool
}{
"empty alias": {
in: Alias{},
wanted: true,
},
"non empty alias": {
in: Alias{
StringSliceOrString: StringSliceOrString{
String: aws.String("alias test"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.IsEmpty()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestAlias_ToString(t *testing.T) {
testCases := map[string]struct {
inAlias Alias
wanted string
}{
"alias using string": {
inAlias: Alias{
StringSliceOrString: StringSliceOrString{
String: stringP("example.com"),
},
},
wanted: "example.com",
},
"alias using string slice": {
inAlias: Alias{
StringSliceOrString: StringSliceOrString{
StringSlice: []string{"example.com", "v1.example.com"},
},
},
wanted: "example.com,v1.example.com",
},
"alias using advanced alias slice": {
inAlias: Alias{
AdvancedAliases: []AdvancedAlias{
{
Alias: aws.String("example.com"),
},
{
Alias: aws.String("v1.example.com"),
},
},
},
wanted: "example.com,v1.example.com",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.inAlias.ToString()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestAlias_ToStringSlice(t *testing.T) {
testCases := map[string]struct {
inAlias Alias
wanted []string
}{
"alias using string": {
inAlias: Alias{
StringSliceOrString: StringSliceOrString{
String: stringP("example.com"),
},
},
wanted: []string{"example.com"},
},
"alias using string slice": {
inAlias: Alias{
StringSliceOrString: StringSliceOrString{
StringSlice: []string{"example.com", "v1.example.com"},
},
},
wanted: []string{"example.com", "v1.example.com"},
},
"alias using advanced alias slice": {
inAlias: Alias{
AdvancedAliases: []AdvancedAlias{
{
Alias: aws.String("example.com"),
},
{
Alias: aws.String("v1.example.com"),
},
},
},
wanted: []string{"example.com", "v1.example.com"},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got, _ := tc.inAlias.ToStringSlice()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
| 290 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"bytes"
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"gopkg.in/yaml.v3"
)
const (
reservedEnvVarKeyForAppName = "COPILOT_APPLICATION_NAME"
reservedEnvVarKeyForEnvName = "COPILOT_ENVIRONMENT_NAME"
)
var (
// Taken from docker/compose.
// Environment variable names consist solely of uppercase letters, digits, and underscore,
// and do not begin with a digit. (https://pubs.opengroup.org/onlinepubs/007904875/basedefs/xbd_chap08.html)
interpolatorEnvVarRegExp = regexp.MustCompile(`\${([_a-zA-Z][_a-zA-Z0-9]*)}`)
)
// Interpolator substitutes variables in a manifest.
type Interpolator struct {
predefinedEnvVars map[string]string
}
// NewInterpolator initiates a new Interpolator.
func NewInterpolator(appName, envName string) *Interpolator {
return &Interpolator{
predefinedEnvVars: map[string]string{
reservedEnvVarKeyForAppName: appName,
reservedEnvVarKeyForEnvName: envName,
},
}
}
// Interpolate substitutes environment variables in a string.
func (i *Interpolator) Interpolate(s string) (string, error) {
content, err := unmarshalYAML([]byte(s))
if err != nil {
return "", err
}
if err := i.applyInterpolation(content); err != nil {
return "", err
}
out, err := marshalYAML(content)
if err != nil {
return "", err
}
return string(out), nil
}
func (i *Interpolator) applyInterpolation(node *yaml.Node) error {
switch node.Tag {
case "!!map":
// The content of a map always come in pairs. If the node pair exists, return the map node.
// Note that the rest of code massively uses yaml node tree.
// Please refer to https://www.efekarakus.com/2020/05/30/deep-dive-go-yaml-cfn.html
for idx := 0; idx < len(node.Content); idx += 2 {
if err := i.applyInterpolation(node.Content[idx+1]); err != nil {
return err
}
}
case "!!str":
interpolated, err := i.interpolatePart(node.Value)
if err != nil {
return err
}
var s []string
if err = json.Unmarshal([]byte(interpolated), &s); err == nil && len(s) != 0 {
seqNode := &yaml.Node{
Kind: yaml.SequenceNode,
}
for _, value := range s {
seqNode.Content = append(seqNode.Content, &yaml.Node{
Kind: yaml.ScalarNode,
Value: value,
})
}
*node = *seqNode
} else {
node.Value = interpolated
}
default:
for _, content := range node.Content {
if err := i.applyInterpolation(content); err != nil {
return err
}
}
}
return nil
}
func (i *Interpolator) interpolatePart(s string) (string, error) {
matches := interpolatorEnvVarRegExp.FindAllStringSubmatch(s, -1)
if len(matches) == 0 {
return s, nil
}
replaced := s
for _, match := range matches {
// https://pkg.go.dev/regexp#Regexp.FindAllStringSubmatch
key := match[1]
currSegment := fmt.Sprintf("${%s}", key)
predefinedVal, isPredefined := i.predefinedEnvVars[key]
osVal, isEnvVarSet := os.LookupEnv(key)
if isPredefined && isEnvVarSet && predefinedVal != osVal {
return "", fmt.Errorf(`predefined environment variable "%s" cannot be overridden by OS environment variable with the same name`, key)
}
if isPredefined {
replaced = strings.ReplaceAll(replaced, currSegment, predefinedVal)
continue
}
if isEnvVarSet {
replaced = strings.ReplaceAll(replaced, currSegment, osVal)
continue
}
return "", fmt.Errorf(`environment variable "%s" is not defined`, key)
}
return replaced, nil
}
func unmarshalYAML(temp []byte) (*yaml.Node, error) {
var node yaml.Node
if err := yaml.Unmarshal(temp, &node); err != nil {
return nil, fmt.Errorf("unmarshal YAML template: %w", err)
}
return &node, nil
}
func marshalYAML(content *yaml.Node) ([]byte, error) {
var out bytes.Buffer
yamlEncoder := yaml.NewEncoder(&out)
yamlEncoder.SetIndent(2)
if err := yamlEncoder.Encode(content); err != nil {
return nil, fmt.Errorf("marshal YAML template: %w", err)
}
return out.Bytes(), nil
}
| 146 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"fmt"
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestInterpolator_Interpolate(t *testing.T) {
testCases := map[string]struct {
inputEnvVar map[string]string
inputStr string
wanted string
wantedErr error
}{
"should return error if env var is not defined": {
inputStr: "/copilot/my-app/${env}/secrets/db_password",
wantedErr: fmt.Errorf(`environment variable "env" is not defined`),
},
"should return error if trying to override predefined env var": {
inputStr: "/copilot/my-app/${COPILOT_ENVIRONMENT_NAME}/secrets/db_password",
inputEnvVar: map[string]string{
"COPILOT_ENVIRONMENT_NAME": "prod",
},
wantedErr: fmt.Errorf(`predefined environment variable "COPILOT_ENVIRONMENT_NAME" cannot be overridden by OS environment variable with the same name`),
},
"success with no matches": {
inputStr: "1234567890.dkr.ecr.us-west-2.amazonaws.com/vault/test:latest",
wanted: "1234567890.dkr.ecr.us-west-2.amazonaws.com/vault/test:latest\n",
},
"success": {
inputStr: `# The manifest for the ${name} service.
# Your service name will be used in naming your resources like log groups, ECS services, etc.
name: loadtester
type: Backend Service
# Your service is reachable at "http://loadtester.${COPILOT_SERVICE_DISCOVERY_ENDPOINT}:80" but is not public.
http:
allowed_source_ips:
- ${ip}
# Configuration for your containers and service.
image:
# Docker build arguments. For additional overrides: https://aws.github.io/copilot-cli/docs/manifest/backend-service/#image-build
location: ${0accountID}.dkr.${repo-provider}.${region}.amazonaws.com/vault/${COPILOT_ENVIRONMENT_NAME}:${tag}
port: 80
cpu: 256#${CPU}
memory: 512 # ${Memory}
variables:
${foo}: ${bar}
network:
vpc:
security_groups: ${SECURITY_GROUPS}
`,
inputEnvVar: map[string]string{
"0accountID": "1234567890",
"repo-provider": "ecr",
"tag": "latest",
"COPILOT_APPLICATION_NAME": "myApp",
"region": "",
"CPU": "512",
"bar": "bar",
"ip": "10.24.34.0/23",
"SECURITY_GROUPS": `["sg-1","sg-2","sg-3"]`,
},
wanted: `# The manifest for the ${name} service.
# Your service name will be used in naming your resources like log groups, ECS services, etc.
name: loadtester
type: Backend Service
# Your service is reachable at "http://loadtester.${COPILOT_SERVICE_DISCOVERY_ENDPOINT}:80" but is not public.
http:
allowed_source_ips:
- 10.24.34.0/23
# Configuration for your containers and service.
image:
# Docker build arguments. For additional overrides: https://aws.github.io/copilot-cli/docs/manifest/backend-service/#image-build
location: ${0accountID}.dkr.${repo-provider}..amazonaws.com/vault/test:latest
port: 80
cpu: 256#512
memory: 512 # ${Memory}
variables:
${foo}: bar
network:
vpc:
security_groups:
- sg-1
- sg-2
- sg-3
`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
itpl := NewInterpolator(
"myApp",
"test",
)
for k, v := range tc.inputEnvVar {
require.NoError(t, os.Setenv(k, v))
defer func(key string) {
require.NoError(t, os.Unsetenv(key))
}(k)
}
actual, actualErr := itpl.Interpolate(tc.inputStr)
// THEN
if tc.wantedErr != nil {
require.EqualError(t, actualErr, tc.wantedErr.Error())
} else {
require.NoError(t, actualErr)
require.Equal(t, tc.wanted, actual)
}
})
}
}
| 131 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/imdario/mergo"
)
const (
scheduledJobManifestPath = "workloads/jobs/scheduled-job/manifest.yml"
)
// ScheduledJob holds the configuration to build a container image that is run
// periodically in a given environment with timeout and retry logic.
type ScheduledJob struct {
Workload `yaml:",inline"`
ScheduledJobConfig `yaml:",inline"`
Environments map[string]*ScheduledJobConfig `yaml:",flow"`
parser template.Parser
}
func (s *ScheduledJob) subnets() *SubnetListOrArgs {
return &s.Network.VPC.Placement.Subnets
}
// ScheduledJobConfig holds the configuration for a scheduled job
type ScheduledJobConfig struct {
ImageConfig ImageWithHealthcheck `yaml:"image,flow"`
ImageOverride `yaml:",inline"`
TaskConfig `yaml:",inline"`
Logging Logging `yaml:"logging,flow"`
Sidecars map[string]*SidecarConfig `yaml:"sidecars"` // NOTE: keep the pointers because `mergo` doesn't automatically deep merge map's value unless it's a pointer type.
On JobTriggerConfig `yaml:"on,flow"`
JobFailureHandlerConfig `yaml:",inline"`
Network NetworkConfig `yaml:"network"`
PublishConfig PublishConfig `yaml:"publish"`
TaskDefOverrides []OverrideRule `yaml:"taskdef_overrides"`
}
// JobTriggerConfig represents the configuration for the event that triggers the job.
type JobTriggerConfig struct {
Schedule *string `yaml:"schedule"`
}
// JobFailureHandlerConfig represents the error handling configuration for the job.
type JobFailureHandlerConfig struct {
Timeout *string `yaml:"timeout"`
Retries *int `yaml:"retries"`
}
// ScheduledJobProps contains properties for creating a new scheduled job manifest.
type ScheduledJobProps struct {
*WorkloadProps
Schedule string
Timeout string
HealthCheck ContainerHealthCheck // Optional healthcheck configuration.
Platform PlatformArgsOrString // Optional platform configuration.
Retries int
}
// NewScheduledJob creates a new scheduled job object.
func NewScheduledJob(props *ScheduledJobProps) *ScheduledJob {
job := newDefaultScheduledJob()
// Apply overrides.
job.Name = stringP(props.Name)
job.ImageConfig.Image.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile)
job.ImageConfig.Image.Location = stringP(props.Image)
job.ImageConfig.HealthCheck = props.HealthCheck
job.Platform = props.Platform
if isWindowsPlatform(props.Platform) {
job.TaskConfig.CPU = aws.Int(MinWindowsTaskCPU)
job.TaskConfig.Memory = aws.Int(MinWindowsTaskMemory)
}
job.On.Schedule = stringP(props.Schedule)
if props.Retries != 0 {
job.Retries = aws.Int(props.Retries)
}
job.Timeout = stringP(props.Timeout)
for _, envName := range props.PrivateOnlyEnvironments {
job.Environments[envName] = &ScheduledJobConfig{
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
},
}
}
job.parser = template.New()
return job
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (j *ScheduledJob) MarshalBinary() ([]byte, error) {
content, err := j.parser.Parse(scheduledJobManifestPath, *j)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
func (j ScheduledJob) applyEnv(envName string) (workloadManifest, error) {
overrideConfig, ok := j.Environments[envName]
if !ok {
return &j, nil
}
// Apply overrides to the original job
for _, t := range defaultTransformers {
err := mergo.Merge(&j, ScheduledJob{
ScheduledJobConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithTransformers(t))
if err != nil {
return nil, err
}
}
j.Environments = nil
return &j, nil
}
func (s *ScheduledJob) requiredEnvironmentFeatures() []string {
var features []string
features = append(features, s.Network.requiredEnvFeatures()...)
features = append(features, s.Storage.requiredEnvFeatures()...)
return features
}
// Publish returns the list of topics where notifications can be published.
func (j *ScheduledJob) Publish() []Topic {
return j.ScheduledJobConfig.PublishConfig.publishedTopics()
}
// BuildArgs returns a docker.BuildArguments object for the job given a context directory.
func (j *ScheduledJob) BuildArgs(contextDir string) (map[string]*DockerBuildArgs, error) {
required, err := requiresBuild(j.ImageConfig.Image)
if err != nil {
return nil, err
}
// Creating an map to store buildArgs of all sidecar images and main container image.
buildArgsPerContainer := make(map[string]*DockerBuildArgs, len(j.Sidecars)+1)
if required {
buildArgsPerContainer[aws.StringValue(j.Name)] = j.ImageConfig.Image.BuildConfig(contextDir)
}
return buildArgs(contextDir, buildArgsPerContainer, j.Sidecars)
}
// EnvFiles returns the locations of all env files against the ws root directory.
// This method returns a map[string]string where the keys are container names
// and the values are either env file paths or empty strings.
func (j *ScheduledJob) EnvFiles() map[string]string {
return envFiles(j.Name, j.TaskConfig, j.Logging, j.Sidecars)
}
// newDefaultScheduledJob returns an empty ScheduledJob with only the default values set.
func newDefaultScheduledJob() *ScheduledJob {
return &ScheduledJob{
Workload: Workload{
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{ // Leave advanced count empty while passing down the type of the workload.
workloadType: manifestinfo.ScheduledJobType,
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*ScheduledJobConfig{},
}
}
// ExposedPorts returns all the ports that are sidecar container ports available to receive traffic.
func (j *ScheduledJob) ExposedPorts() (ExposedPortsIndex, error) {
var exposedPorts []ExposedPort
for name, sidecar := range j.Sidecars {
out, err := sidecar.exposedPorts(name)
if err != nil {
return ExposedPortsIndex{}, err
}
exposedPorts = append(exposedPorts, out...)
}
portsForContainer, containerForPort := prepareParsedExposedPortsMap(sortExposedPorts(exposedPorts))
return ExposedPortsIndex{
PortsForContainer: portsForContainer,
ContainerForPort: containerForPort,
}, nil
}
| 206 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/stretchr/testify/require"
)
func TestScheduledJob_ApplyEnv(t *testing.T) {
testCases := map[string]struct {
inputManifest *ScheduledJob
inputEnv string
wantedManifest *ScheduledJob
wantedErr error
}{
"should return the same scheduled job if the environment does not exist": {
inputManifest: newDefaultScheduledJob(),
inputEnv: "test",
wantedManifest: newDefaultScheduledJob(),
},
"should preserve defaults and only override fields under 'environment'": {
inputManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("report-generator"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("nginx"),
},
},
},
On: JobTriggerConfig{
Schedule: aws.String("@hourly"),
},
JobFailureHandlerConfig: JobFailureHandlerConfig{
Timeout: aws.String("5m"),
Retries: aws.Int(1),
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*ScheduledJobConfig{
"prod": {
TaskConfig: TaskConfig{
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("prod"),
},
},
},
},
},
},
},
inputEnv: "prod",
wantedManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("report-generator"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("nginx"),
},
},
},
On: JobTriggerConfig{
Schedule: aws.String("@hourly"),
},
JobFailureHandlerConfig: JobFailureHandlerConfig{
Timeout: aws.String("5m"),
Retries: aws.Int(1),
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("prod"),
},
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: nil,
},
},
"with image build overridden by image location": {
inputManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
},
},
Environments: map[string]*ScheduledJobConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
inputEnv: "prod-iad",
wantedManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
"with image location overridden by image location": {
inputManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("default location"),
},
},
},
},
Environments: map[string]*ScheduledJobConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
inputEnv: "prod-iad",
wantedManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
"with image build overridden by image build": {
inputManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
},
},
Environments: map[string]*ScheduledJobConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
inputEnv: "prod-iad",
wantedManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
"with image location overridden by image build": {
inputManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("default location"),
},
},
},
},
Environments: map[string]*ScheduledJobConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
inputEnv: "prod-iad",
wantedManifest: &ScheduledJob{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
actualManifest, actualErr := tc.inputManifest.applyEnv(tc.inputEnv)
// THEN
if tc.wantedErr != nil {
require.EqualError(t, actualErr, tc.wantedErr.Error())
} else {
require.NoError(t, actualErr)
require.Equal(t, tc.wantedManifest, actualManifest)
}
})
}
}
func TestScheduledJob_RequiredEnvironmentFeatures(t *testing.T) {
testCases := map[string]struct {
mft func(svc *ScheduledJob)
wanted []string
}{
"no feature required by default": {
mft: func(svc *ScheduledJob) {},
},
"nat feature required": {
mft: func(svc *ScheduledJob) {
svc.Network = NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
}
},
wanted: []string{template.NATFeatureName},
},
"efs feature required by enabling managed volume": {
mft: func(svc *ScheduledJob) {
svc.Storage = Storage{
Volumes: map[string]*Volume{
"mock-managed-volume-1": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
},
"mock-imported-volume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("mock-id"),
},
},
},
},
}
},
wanted: []string{template.EFSFeatureName},
},
"efs feature not required because storage is imported": {
mft: func(svc *ScheduledJob) {
svc.Storage = Storage{
Volumes: map[string]*Volume{
"mock-imported-volume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("mock-id"),
},
},
},
},
}
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
inSvc := ScheduledJob{
Workload: Workload{
Name: aws.String("mock-svc"),
Type: aws.String(manifestinfo.ScheduledJobType),
},
}
tc.mft(&inSvc)
got := inSvc.requiredEnvironmentFeatures()
require.Equal(t, tc.wanted, got)
})
}
}
func TestScheduledJob_Publish(t *testing.T) {
testCases := map[string]struct {
mft *ScheduledJob
wantedTopics []Topic
}{
"returns nil if there are no topics set": {
mft: &ScheduledJob{},
},
"returns the list of topics if manifest publishes notifications": {
mft: &ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
PublishConfig: PublishConfig{
Topics: []Topic{
{
Name: stringP("hello"),
},
},
},
},
},
wantedTopics: []Topic{
{
Name: stringP("hello"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
actual := tc.mft.Publish()
// THEN
require.Equal(t, tc.wantedTopics, actual)
})
}
}
| 451 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"strconv"
"time"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
"github.com/aws/aws-sdk-go/aws"
"github.com/imdario/mergo"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
)
const (
lbWebSvcManifestPath = "workloads/services/lb-web/manifest.yml"
)
// Default values for HTTPHealthCheck for a load balanced web service.
const (
DefaultHealthCheckPath = "/"
DefaultHealthCheckAdminPath = "admin"
DefaultHealthCheckGracePeriod = 60
DefaultDeregistrationDelay = 60
)
const (
GRPCProtocol = "gRPC" // GRPCProtocol is the HTTP protocol version for gRPC.
commonGRPCPort = uint16(50051)
)
// durationp is a utility function used to convert a time.Duration to a pointer. Useful for YAML unmarshaling
// and template execution.
func durationp(v time.Duration) *time.Duration {
return &v
}
// LoadBalancedWebService holds the configuration to build a container image with an exposed port that receives
// requests through a load balancer with AWS Fargate as the compute engine.
type LoadBalancedWebService struct {
Workload `yaml:",inline"`
LoadBalancedWebServiceConfig `yaml:",inline"`
// Use *LoadBalancedWebServiceConfig because of https://github.com/imdario/mergo/issues/146
Environments map[string]*LoadBalancedWebServiceConfig `yaml:",flow"` // Fields to override per environment.
parser template.Parser
}
// LoadBalancedWebServiceConfig holds the configuration for a load balanced web service.
type LoadBalancedWebServiceConfig struct {
ImageConfig ImageWithPortAndHealthcheck `yaml:"image,flow"`
ImageOverride `yaml:",inline"`
HTTPOrBool HTTPOrBool `yaml:"http,flow"`
TaskConfig `yaml:",inline"`
Logging `yaml:"logging,flow"`
Sidecars map[string]*SidecarConfig `yaml:"sidecars"` // NOTE: keep the pointers because `mergo` doesn't automatically deep merge map's value unless it's a pointer type.
Network NetworkConfig `yaml:"network"`
PublishConfig PublishConfig `yaml:"publish"`
TaskDefOverrides []OverrideRule `yaml:"taskdef_overrides"`
NLBConfig NetworkLoadBalancerConfiguration `yaml:"nlb"`
DeployConfig DeploymentConfig `yaml:"deployment"`
Observability Observability `yaml:"observability"`
}
// LoadBalancedWebServiceProps contains properties for creating a new load balanced fargate service manifest.
type LoadBalancedWebServiceProps struct {
*WorkloadProps
Path string
Port uint16
HealthCheck ContainerHealthCheck // Optional healthcheck configuration.
Platform PlatformArgsOrString // Optional platform configuration.
}
// NewLoadBalancedWebService creates a new public load balanced web service, receives all the requests from the load balancer,
// has a single task with minimal CPU and memory thresholds, and sets the default health check path to "/".
func NewLoadBalancedWebService(props *LoadBalancedWebServiceProps) *LoadBalancedWebService {
svc := newDefaultHTTPLoadBalancedWebService()
// Apply overrides.
svc.Name = stringP(props.Name)
svc.LoadBalancedWebServiceConfig.ImageConfig.Image.Location = stringP(props.Image)
svc.LoadBalancedWebServiceConfig.ImageConfig.Image.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile)
svc.LoadBalancedWebServiceConfig.ImageConfig.Port = aws.Uint16(props.Port)
svc.LoadBalancedWebServiceConfig.ImageConfig.HealthCheck = props.HealthCheck
svc.LoadBalancedWebServiceConfig.Platform = props.Platform
if isWindowsPlatform(props.Platform) {
svc.LoadBalancedWebServiceConfig.TaskConfig.CPU = aws.Int(MinWindowsTaskCPU)
svc.LoadBalancedWebServiceConfig.TaskConfig.Memory = aws.Int(MinWindowsTaskMemory)
}
if props.Port == commonGRPCPort {
log.Infof("Detected port %s, setting HTTP protocol version to %s in the manifest.\n",
color.HighlightUserInput(strconv.Itoa(int(props.Port))), color.HighlightCode(GRPCProtocol))
svc.HTTPOrBool.Main.ProtocolVersion = aws.String(GRPCProtocol)
}
svc.HTTPOrBool.Main.Path = aws.String(props.Path)
svc.parser = template.New()
for _, envName := range props.PrivateOnlyEnvironments {
svc.Environments[envName] = &LoadBalancedWebServiceConfig{
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
},
}
}
return svc
}
// newDefaultHTTPLoadBalancedWebService returns an empty LoadBalancedWebService with only the default values set, including default HTTP configurations.
func newDefaultHTTPLoadBalancedWebService() *LoadBalancedWebService {
lbws := newDefaultLoadBalancedWebService()
lbws.HTTPOrBool = HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs](DefaultHealthCheckPath),
},
},
},
}
return lbws
}
// newDefaultLoadBalancedWebService returns an empty LoadBalancedWebService with only the default values set, without any load balancer configuration.
func newDefaultLoadBalancedWebService() *LoadBalancedWebService {
return &LoadBalancedWebService{
Workload: Workload{
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{ // Leave advanced count empty while passing down the type of the workload.
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{},
}
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (s *LoadBalancedWebService) MarshalBinary() ([]byte, error) {
content, err := s.parser.Parse(lbWebSvcManifestPath, *s)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
func (s *LoadBalancedWebService) requiredEnvironmentFeatures() []string {
var features []string
if !s.HTTPOrBool.Disabled() {
features = append(features, template.ALBFeatureName)
}
features = append(features, s.Network.requiredEnvFeatures()...)
features = append(features, s.Storage.requiredEnvFeatures()...)
return features
}
// Port returns the exposed port in the manifest.
// A LoadBalancedWebService always has a port exposed therefore the boolean is always true.
func (s *LoadBalancedWebService) Port() (port uint16, ok bool) {
return aws.Uint16Value(s.ImageConfig.Port), true
}
// Publish returns the list of topics where notifications can be published.
func (s *LoadBalancedWebService) Publish() []Topic {
return s.LoadBalancedWebServiceConfig.PublishConfig.publishedTopics()
}
// BuildArgs returns a docker.BuildArguments object given a context directory.
func (s *LoadBalancedWebService) BuildArgs(contextDir string) (map[string]*DockerBuildArgs, error) {
required, err := requiresBuild(s.ImageConfig.Image)
if err != nil {
return nil, err
}
// Creating an map to store buildArgs of all sidecar images and main container image.
buildArgsPerContainer := make(map[string]*DockerBuildArgs, len(s.Sidecars)+1)
if required {
buildArgsPerContainer[aws.StringValue(s.Name)] = s.ImageConfig.Image.BuildConfig(contextDir)
}
return buildArgs(contextDir, buildArgsPerContainer, s.Sidecars)
}
// EnvFiles returns the locations of all env files against the ws root directory.
// This method returns a map[string]string where the keys are container names
// and the values are either env file paths or empty strings.
func (s *LoadBalancedWebService) EnvFiles() map[string]string {
return envFiles(s.Name, s.TaskConfig, s.Logging, s.Sidecars)
}
func (s *LoadBalancedWebService) subnets() *SubnetListOrArgs {
return &s.Network.VPC.Placement.Subnets
}
func (s LoadBalancedWebService) applyEnv(envName string) (workloadManifest, error) {
overrideConfig, ok := s.Environments[envName]
if !ok {
return &s, nil
}
if overrideConfig == nil {
return &s, nil
}
for _, t := range defaultTransformers {
// Apply overrides to the original service s.
err := mergo.Merge(&s, LoadBalancedWebService{
LoadBalancedWebServiceConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithTransformers(t))
if err != nil {
return nil, err
}
}
s.Environments = nil
return &s, nil
}
// NetworkLoadBalancerConfiguration holds options for a network load balancer.
type NetworkLoadBalancerConfiguration struct {
Listener NetworkLoadBalancerListener `yaml:",inline"`
Aliases Alias `yaml:"alias"`
AdditionalListeners []NetworkLoadBalancerListener `yaml:"additional_listeners"`
}
// NetworkLoadBalancerListener holds listener configuration for NLB.
type NetworkLoadBalancerListener struct {
Port *string `yaml:"port"`
HealthCheck NLBHealthCheckArgs `yaml:"healthcheck"`
TargetContainer *string `yaml:"target_container"`
TargetPort *int `yaml:"target_port"`
SSLPolicy *string `yaml:"ssl_policy"`
Stickiness *bool `yaml:"stickiness"`
DeregistrationDelay *time.Duration `yaml:"deregistration_delay"`
}
// IsEmpty returns true if NetworkLoadBalancerConfiguration is empty.
func (c *NetworkLoadBalancerConfiguration) IsEmpty() bool {
return c.Aliases.IsEmpty() && c.Listener.IsEmpty() && len(c.AdditionalListeners) == 0
}
// IsEmpty returns true if NetworkLoadBalancerListener is empty.
func (c *NetworkLoadBalancerListener) IsEmpty() bool {
return c.Port == nil && c.HealthCheck.isEmpty() && c.TargetContainer == nil && c.TargetPort == nil &&
c.SSLPolicy == nil && c.Stickiness == nil && c.DeregistrationDelay == nil
}
// ExposedPorts returns all the ports that are container ports available to receive traffic.
func (lbws *LoadBalancedWebService) ExposedPorts() (ExposedPortsIndex, error) {
var exposedPorts []ExposedPort
workloadName := aws.StringValue(lbws.Name)
// port from image.port.
exposedPorts = append(exposedPorts, lbws.ImageConfig.exposedPorts(workloadName)...)
// port from sidecar[x].image.port.
for name, sidecar := range lbws.Sidecars {
out, err := sidecar.exposedPorts(name)
if err != nil {
return ExposedPortsIndex{}, err
}
exposedPorts = append(exposedPorts, out...)
}
// port from http.target_port and http.additional_rules[x].target_port
for _, rule := range lbws.HTTPOrBool.RoutingRules() {
exposedPorts = append(exposedPorts, rule.exposedPorts(exposedPorts, workloadName)...)
}
// port from nlb.target_port and nlb.additional_listeners[x].target_port
for _, listener := range lbws.NLBConfig.NLBListeners() {
out, err := listener.exposedPorts(exposedPorts, workloadName)
if err != nil {
return ExposedPortsIndex{}, err
}
exposedPorts = append(exposedPorts, out...)
}
portsForContainer, containerForPort := prepareParsedExposedPortsMap(sortExposedPorts(exposedPorts))
return ExposedPortsIndex{
WorkloadName: workloadName,
PortsForContainer: portsForContainer,
ContainerForPort: containerForPort,
}, nil
}
// NLBListeners returns main as well as additional listeners as a list of NetworkLoadBalancerListener.
func (cfg NetworkLoadBalancerConfiguration) NLBListeners() []NetworkLoadBalancerListener {
if cfg.IsEmpty() {
return nil
}
return append([]NetworkLoadBalancerListener{cfg.Listener}, cfg.AdditionalListeners...)
}
| 315 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"fmt"
"path/filepath"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestNewHTTPLoadBalancedWebService(t *testing.T) {
testCases := map[string]struct {
props LoadBalancedWebServiceProps
wanted *LoadBalancedWebService
}{
"initializes with default settings when only required configuration is provided": {
props: LoadBalancedWebServiceProps{
WorkloadProps: &WorkloadProps{
Name: "frontend",
Dockerfile: "./Dockerfile",
},
Path: "/",
Port: 80,
},
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: stringP("frontend"),
Type: stringP("Load Balanced Web Service"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: stringP("./Dockerfile"),
},
},
},
},
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{
workloadType: "Load Balanced Web Service",
},
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{},
},
},
"overrides default settings when optional configuration is provided": {
props: LoadBalancedWebServiceProps{
WorkloadProps: &WorkloadProps{
Name: "subscribers",
Dockerfile: "./subscribers/Dockerfile",
PrivateOnlyEnvironments: []string{
"metrics",
},
},
Path: "/",
Port: 80,
HealthCheck: ContainerHealthCheck{
Command: []string{"CMD", "curl -f http://localhost:8080 || exit 1"},
},
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("windows/amd64"))},
},
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: stringP("subscribers"),
Type: stringP(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./subscribers/Dockerfile"),
},
},
},
},
Port: aws.Uint16(80),
},
HealthCheck: ContainerHealthCheck{
Command: []string{"CMD", "curl -f http://localhost:8080 || exit 1"},
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(1024),
Memory: aws.Int(2048),
Platform: PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("windows/amd64")),
PlatformArgs: PlatformArgs{
OSFamily: nil,
Arch: nil,
},
},
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{
workloadType: "Load Balanced Web Service",
},
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"metrics": {
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
manifest := NewLoadBalancedWebService(&tc.props)
// THEN
require.Equal(t, tc.wanted.Workload, manifest.Workload)
require.Equal(t, tc.wanted.LoadBalancedWebServiceConfig, manifest.LoadBalancedWebServiceConfig)
require.Equal(t, tc.wanted.Environments, manifest.Environments)
})
}
}
func TestNewLoadBalancedWebService_UnmarshalYaml(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct HealthCheckArgsOrString
}{
"non-args path string": {
inContent: []byte(` healthcheck: /testing`),
wantedStruct: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/testing"),
},
},
"should use custom healthcheck configuration when provided and set default path to nil": {
inContent: []byte(` healthcheck:
path: /testing
healthy_threshold: 5
unhealthy_threshold: 6
interval: 78s
timeout: 9s`),
wantedStruct: HealthCheckArgsOrString{
Union: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("/testing"),
HealthyThreshold: aws.Int64(5),
UnhealthyThreshold: aws.Int64(6),
Interval: durationp(78 * time.Second),
Timeout: durationp(9 * time.Second),
}),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
rr := newDefaultHTTPLoadBalancedWebService().HTTPOrBool
err := yaml.Unmarshal(tc.inContent, &rr)
require.NoError(t, err)
require.Equal(t, tc.wantedStruct.Advanced, rr.Main.HealthCheck.Advanced)
require.Equal(t, tc.wantedStruct.Advanced.Path, rr.Main.HealthCheck.Advanced.Path)
require.Equal(t, tc.wantedStruct.Advanced.HealthyThreshold, rr.Main.HealthCheck.Advanced.HealthyThreshold)
require.Equal(t, tc.wantedStruct.Advanced.UnhealthyThreshold, rr.Main.HealthCheck.Advanced.UnhealthyThreshold)
require.Equal(t, tc.wantedStruct.Advanced.Interval, rr.Main.HealthCheck.Advanced.Interval)
require.Equal(t, tc.wantedStruct.Advanced.Timeout, rr.Main.HealthCheck.Advanced.Timeout)
})
}
}
func TestLoadBalancedWebService_ApplyEnv(t *testing.T) {
var (
perc = Percentage(80)
mockIPNet1 = IPNet("10.1.0.0/24")
mockIPNet2 = IPNet("10.1.1.0/24")
mockRange = IntRangeBand("1-10")
mockConfig = ScalingConfigOrT[Percentage]{
Value: &perc,
}
)
testCases := map[string]struct {
in *LoadBalancedWebService
envToApply string
wanted *LoadBalancedWebService
}{
"with no existing environments": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/awards/*"),
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(1024),
Memory: aws.Int(1024),
Count: Count{
Value: aws.Int(1),
},
Variables: map[string]Variable{
"VAR1": {
stringOrFromCFN{
Plain: stringP("var1"),
},
},
"VAR2": {
stringOrFromCFN{
FromCFN: fromCFN{
Name: stringP("import-var2"),
},
},
},
},
Storage: Storage{
Volumes: map[string]*Volume{
"myEFSVolume": {
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("/path/to/files"),
ReadOnly: aws.Bool(false),
},
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-1234"),
},
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/awards/*"),
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(1024),
Memory: aws.Int(1024),
Count: Count{
Value: aws.Int(1),
},
Variables: map[string]Variable{
"VAR1": {
stringOrFromCFN{
Plain: stringP("var1"),
},
},
"VAR2": {
stringOrFromCFN{
FromCFN: fromCFN{
Name: stringP("import-var2"),
},
},
},
},
Storage: Storage{
Volumes: map[string]*Volume{
"myEFSVolume": {
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("/path/to/files"),
ReadOnly: aws.Bool(false),
},
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-1234"),
},
},
},
},
},
},
},
},
},
"with overrides": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/awards/*"),
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(1024),
Memory: aws.Int(1024),
Count: Count{
Value: aws.Int(1),
},
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("DEBUG"),
},
},
"S3_TABLE_NAME": {
stringOrFromCFN{
Plain: stringP("doggo"),
},
},
"RDS_TABLE_NAME": {
stringOrFromCFN{
FromCFN: fromCFN{
Name: stringP("duckling"),
},
},
},
"DDB_TABLE_NAME": {
stringOrFromCFN{
FromCFN: fromCFN{
Name: stringP("awards"),
},
},
},
},
Secrets: map[string]Secret{
"GITHUB_TOKEN": {
from: stringOrFromCFN{
Plain: aws.String("1111"),
},
},
"TWILIO_TOKEN": {
from: stringOrFromCFN{
Plain: aws.String("1111"),
},
},
},
Storage: Storage{
Volumes: map[string]*Volume{
"myEFSVolume": {
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("/path/to/files"),
ReadOnly: aws.Bool(false),
},
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-1234"),
AuthConfig: AuthorizationConfig{
IAM: aws.Bool(true),
AccessPointID: aws.String("ap-1234"),
},
},
},
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
Logging: Logging{
ConfigFile: aws.String("mockConfigFile"),
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{{
Plain: aws.String("sg-123"),
}},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./RealDockerfile"),
},
},
},
},
Port: aws.Uint16(5000),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
TargetContainer: aws.String("xray"),
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(2046),
Count: Count{
Value: aws.Int(0),
},
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("ERROR"),
},
},
"S3_TABLE_NAME": {
stringOrFromCFN{
FromCFN: fromCFN{Name: stringP("prod-doggo")},
},
},
"RDS_TABLE_NAME": {
stringOrFromCFN{Plain: stringP("duckling-prod")},
},
"DDB_TABLE_NAME": {
stringOrFromCFN{
FromCFN: fromCFN{Name: stringP("awards-prod")},
},
},
},
Storage: Storage{
Volumes: map[string]*Volume{
"myEFSVolume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-5678"),
AuthConfig: AuthorizationConfig{
AccessPointID: aws.String("ap-5678"),
},
},
},
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000/udp"),
MountPoints: []SidecarMountPoint{
{
SourceVolume: aws.String("myEFSVolume"),
MountPointOpts: MountPointOpts{
ReadOnly: aws.Bool(true),
ContainerPath: aws.String("/var/www"),
},
},
},
},
},
Logging: Logging{
SecretOptions: map[string]Secret{
"FOO": {
from: stringOrFromCFN{
Plain: aws.String("BAR"),
},
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("sg-456"),
},
{
Plain: aws.String("sg-789"),
},
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./RealDockerfile"),
},
},
},
},
Port: aws.Uint16(5000),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/awards/*"),
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
TargetContainer: aws.String("xray"),
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(2046),
Memory: aws.Int(1024),
Count: Count{
Value: aws.Int(0),
},
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("ERROR"),
},
},
"S3_TABLE_NAME": {
stringOrFromCFN{
FromCFN: fromCFN{Name: stringP("prod-doggo")},
},
},
"RDS_TABLE_NAME": {
stringOrFromCFN{
Plain: stringP("duckling-prod"),
},
},
"DDB_TABLE_NAME": {
stringOrFromCFN{
FromCFN: fromCFN{Name: stringP("awards-prod")},
},
},
},
Secrets: map[string]Secret{
"GITHUB_TOKEN": {
from: stringOrFromCFN{
Plain: aws.String("1111"),
},
},
"TWILIO_TOKEN": {
from: stringOrFromCFN{
Plain: aws.String("1111"),
},
},
},
Storage: Storage{
Volumes: map[string]*Volume{
"myEFSVolume": {
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("/path/to/files"),
ReadOnly: aws.Bool(false),
},
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-5678"),
AuthConfig: AuthorizationConfig{
IAM: aws.Bool(true),
AccessPointID: aws.String("ap-5678"),
},
},
},
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000/udp"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
MountPoints: []SidecarMountPoint{
{
SourceVolume: aws.String("myEFSVolume"),
MountPointOpts: MountPointOpts{
ReadOnly: aws.Bool(true),
ContainerPath: aws.String("/var/www"),
},
},
},
},
},
Logging: Logging{
ConfigFile: aws.String("mockConfigFile"),
SecretOptions: map[string]Secret{
"FOO": {
from: stringOrFromCFN{
Plain: aws.String("BAR"),
},
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("sg-456"),
},
{
Plain: aws.String("sg-789"),
},
},
},
},
},
},
},
},
"with empty env override": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
CPU: mockConfig,
},
},
Variables: map[string]Variable{
"VAR1": {
stringOrFromCFN{
Plain: stringP("var1"),
},
},
"VAR2": {
stringOrFromCFN{
FromCFN: fromCFN{Name: stringP("import-var2")},
},
},
},
},
ImageOverride: ImageOverride{
Command: CommandOverride{
StringSlice: []string{"command", "default"},
},
EntryPoint: EntryPointOverride{
StringSlice: []string{"entrypoint", "default"},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": nil,
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
Value: nil,
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
CPU: mockConfig,
},
},
Variables: map[string]Variable{
"VAR1": {
stringOrFromCFN{
Plain: stringP("var1"),
},
},
"VAR2": {
stringOrFromCFN{
FromCFN: fromCFN{Name: stringP("import-var2")},
},
},
},
},
ImageOverride: ImageOverride{
Command: CommandOverride{
StringSlice: []string{"command", "default"},
},
EntryPoint: EntryPointOverride{
StringSlice: []string{"entrypoint", "default"},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": nil,
},
},
},
"with range override and preserving network config": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
CPU: mockConfig,
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("sg-456"),
},
{
Plain: aws.String("sg-789"),
},
},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
Value: nil,
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
CPU: mockConfig,
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("sg-456"),
},
{
Plain: aws.String("sg-789"),
},
},
},
},
},
},
},
},
"with network config overridden by security group config": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
SecurityGroups: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("sg-535"),
},
{
Plain: aws.String("sg-789"),
},
},
},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
Network: NetworkConfig{
VPC: vpcConfig{
SecurityGroups: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("sg-456"),
},
{
Plain: aws.String("sg-700"),
},
},
DenyDefault: aws.Bool(true),
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
SecurityGroups: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("sg-456"),
},
{
Plain: aws.String("sg-700"),
},
},
DenyDefault: aws.Bool(true),
},
},
},
},
},
},
},
"with count value overridden by count value": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
Value: aws.Int(5),
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
TaskConfig: TaskConfig{
Count: Count{
Value: aws.Int(7),
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
Value: aws.Int(7),
},
},
},
},
},
"with count value overridden by spot count": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{Value: aws.Int(3)},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(6),
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(6),
},
},
},
},
},
},
"with range overridden by spot count": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(5),
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(5),
},
},
},
},
},
},
"with range overridden by range config": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(8),
},
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(8),
},
},
},
},
},
},
},
},
"with spot overridden by count value": {
in: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(5),
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
TaskConfig: TaskConfig{
Count: Count{Value: aws.Int(15)},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{Value: aws.Int(15)},
},
},
},
},
"with image build overridden by image location": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
},
"with image location overridden by image location": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("default location"),
},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
},
"with image build overridden by image build": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
},
"with image location overridden by image build": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("default location"),
},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
},
"with command and entrypoint overridden": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageOverride: ImageOverride{
Command: CommandOverride{
StringSlice: []string{"command", "default"},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
ImageOverride: ImageOverride{
Command: CommandOverride{
StringSlice: []string{"command", "prod"},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageOverride: ImageOverride{
Command: CommandOverride{
StringSlice: []string{"command", "prod"},
},
},
},
},
},
"with routing rule overridden": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("path"),
},
AllowedSourceIps: []IPNet{mockIPNet1},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
AllowedSourceIps: []IPNet{mockIPNet2},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("path"),
},
AllowedSourceIps: []IPNet{mockIPNet2},
},
},
},
},
},
},
"with routing rule overridden without allowed source ips": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("path"),
},
AllowedSourceIps: []IPNet{mockIPNet1, mockIPNet2},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("another-path"),
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("another-path"),
},
AllowedSourceIps: []IPNet{mockIPNet1, mockIPNet2},
},
},
},
},
},
},
"with routing rule overridden without empty allowed source ips": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("path"),
},
AllowedSourceIps: []IPNet{mockIPNet1, mockIPNet2},
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"prod-iad": {
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("another-path"),
},
AllowedSourceIps: []IPNet{},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
HealthCheck: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("another-path"),
},
AllowedSourceIps: []IPNet{},
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
conf, _ := tc.in.applyEnv(tc.envToApply)
// THEN
require.Equal(t, tc.wanted, conf, "returned configuration should have overrides from the environment")
})
}
}
func TestLoadBalancedWebService_Port(t *testing.T) {
// GIVEN
mft := LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: uint16P(80),
},
},
},
}
// WHEN
actual, ok := mft.Port()
// THEN
require.True(t, ok)
require.Equal(t, uint16(80), actual)
}
func TestLoadBalancedWebService_Publish(t *testing.T) {
testCases := map[string]struct {
mft *LoadBalancedWebService
wantedTopics []Topic
}{
"returns nil if there are no topics set": {
mft: &LoadBalancedWebService{},
},
"returns the list of topics if manifest publishes notifications": {
mft: &LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
PublishConfig: PublishConfig{
Topics: []Topic{
{
Name: stringP("hello"),
},
},
},
},
},
wantedTopics: []Topic{
{
Name: stringP("hello"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
actual := tc.mft.Publish()
// THEN
require.Equal(t, tc.wantedTopics, actual)
})
}
}
func TestNetworkLoadBalancerConfiguration_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in NetworkLoadBalancerConfiguration
wanted bool
}{
"empty": {
in: NetworkLoadBalancerConfiguration{},
wanted: true,
},
"non empty": {
in: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.IsEmpty()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestLoadBalancedWebService_RequiredEnvironmentFeatures(t *testing.T) {
testCases := map[string]struct {
mft func(svc *LoadBalancedWebService)
wanted []string
}{
"no feature required": {
mft: func(svc *LoadBalancedWebService) {
svc.HTTPOrBool = HTTPOrBool{
Enabled: aws.Bool(false),
}
},
},
"alb feature required by default": {
mft: func(svc *LoadBalancedWebService) {},
wanted: []string{template.ALBFeatureName},
},
"nat feature required": {
mft: func(svc *LoadBalancedWebService) {
svc.Network = NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
}
},
wanted: []string{template.ALBFeatureName, template.NATFeatureName},
},
"efs feature required by enabling managed volume with bool": {
mft: func(svc *LoadBalancedWebService) {
svc.Storage = Storage{
Volumes: map[string]*Volume{
"mock-managed-volume-1": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
},
"mock-imported-volume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("mock-id"),
},
},
},
},
}
},
wanted: []string{template.ALBFeatureName, template.EFSFeatureName},
},
"efs feature required by enabling managed volume with uid or gid": {
mft: func(svc *LoadBalancedWebService) {
svc.Storage = Storage{
Volumes: map[string]*Volume{
"mock-managed-volume-1": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(1),
},
},
},
"mock-imported-volume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("mock-id"),
},
},
},
},
}
},
wanted: []string{template.ALBFeatureName, template.EFSFeatureName},
},
"efs feature not required because storage is imported": {
mft: func(svc *LoadBalancedWebService) {
svc.Storage = Storage{
Volumes: map[string]*Volume{
"mock-imported-volume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("mock-id"),
},
},
},
},
}
},
wanted: []string{template.ALBFeatureName},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
inSvc := LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mock-svc"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
}
tc.mft(&inSvc)
got := inSvc.requiredEnvironmentFeatures()
require.Equal(t, tc.wanted, got)
})
}
}
func TestLoadBalancedWebService_ExposedPorts(t *testing.T) {
testCases := map[string]struct {
mft *LoadBalancedWebService
wantedExposedPorts map[string][]ExposedPort
}{
"expose new sidecar container port through alb target_port and target_container": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetContainer: aws.String("xray"),
TargetPort: aws.Uint16(81),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"xray": {
{
Port: 81,
ContainerName: "xray",
Protocol: "tcp",
},
{
Port: 2000,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"expose new primary container port through alb target_port": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(81),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 81,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 2000,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"expose new primary container port through alb target_port and target_container": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetContainer: aws.String("frontend"),
TargetPort: aws.Uint16(81),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 81,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 2000,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"expose sidecar container port through alb target_port": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetContainer: aws.String("xray"),
TargetPort: aws.Uint16(81),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"xray": {
{
Port: 81,
ContainerName: "xray",
Protocol: "tcp",
},
},
},
},
"reference existing sidecar container port through alb target_port": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(81),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("81"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"xray": {
{
Port: 81,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"reference existing primary container port through alb target_port": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(80),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("81"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"xray": {
{
Port: 81,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"ALB exposing multiple main container ports through additional_rules": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(80),
},
AdditionalRoutingRules: []RoutingRule{
{
Path: stringP("/admin"),
TargetPort: uint16P(81),
},
{
Path: stringP("/additional"),
TargetPort: uint16P(82),
TargetContainer: stringP("frontend"),
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("85"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 81,
ContainerName: "frontend",
Protocol: "tcp",
},
{
Port: 82,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 85,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"ALB exposing multiple sidecar ports through additional_rules": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(80),
},
AdditionalRoutingRules: []RoutingRule{
{
Path: stringP("/admin"),
TargetContainer: stringP("xray"),
},
{
Path: stringP("/additional"),
TargetPort: uint16P(82),
TargetContainer: stringP("xray"),
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("81"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"xray": {
{
Port: 81,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 82,
ContainerName: "xray",
Protocol: "tcp",
},
},
},
},
"ALB exposing multiple main as well as sidecar ports through additional_rules": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(80),
},
AdditionalRoutingRules: []RoutingRule{
{
Path: stringP("/sidecaradmin"),
TargetContainer: stringP("xray"),
},
{
Path: stringP("/sidecaradmin1"),
TargetPort: uint16P(81),
},
{
Path: stringP("/additionalsidecar"),
TargetPort: uint16P(82),
TargetContainer: stringP("xray"),
},
{
Path: stringP("/mainadmin"),
TargetContainer: stringP("frontend"),
},
{
Path: stringP("/mainadmin1"),
TargetPort: uint16P(85),
},
{
Path: stringP("/additionalmaincontainer"),
TargetPort: uint16P(86),
TargetContainer: stringP("frontend"),
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("81"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 85,
ContainerName: "frontend",
Protocol: "tcp",
},
{
Port: 86,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 81,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 82,
ContainerName: "xray",
Protocol: "tcp",
},
},
},
},
"ALB and NLB exposes the same additional port on the main container": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(81),
},
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("85"),
TargetPort: aws.Int(81),
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 81,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 2000,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"ALB and NLB exposes two different ports on the main container": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(81),
},
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("82"),
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 81,
ContainerName: "frontend",
Protocol: "tcp",
},
{
Port: 82,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 2000,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"expose new primary container port through NLB config": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(80),
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("82"),
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 80,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 82,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 2000,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"alb and nlb pointing to the same primary container port": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(8080),
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("8080"),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(8080),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("80"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"xray": {
{
Port: 80,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"frontend": {
{
Port: 8080,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
"alb and nlb exposing new ports of the main and sidecar containers": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(8080),
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("8082/tcp"),
TargetContainer: aws.String("xray"),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: aws.String("/"),
TargetPort: aws.Uint16(8081),
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("80"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 8080,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 8081,
ContainerName: "frontend",
Protocol: "tcp",
},
},
"xray": {
{
Port: 80,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 8082,
ContainerName: "xray",
Protocol: "tcp",
},
},
},
},
"nlb exposing new ports of the main and sidecar containers through main and additional listeners": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(8080),
},
},
HTTPOrBool: HTTPOrBool{
Enabled: aws.Bool(false),
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("80"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("8081/tcp"),
TargetContainer: aws.String("xray"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("8082/tls"),
TargetPort: aws.Int(8083),
TargetContainer: aws.String("xray"),
},
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 8080,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"xray": {
{
Port: 80,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
{
Port: 8081,
ContainerName: "xray",
Protocol: "tcp",
},
{
Port: 8083,
ContainerName: "xray",
Protocol: "tcp",
},
},
},
},
"nlb exposing new ports of the main and sidecar containers through main and additional listeners without mentioning the target_port or target_container": {
mft: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("frontend"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Port: aws.Uint16(8080),
},
},
HTTPOrBool: HTTPOrBool{
Enabled: aws.Bool(false),
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("80"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("8080/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("80/tcp"),
},
},
},
},
},
wantedExposedPorts: map[string][]ExposedPort{
"frontend": {
{
Port: 8080,
ContainerName: "frontend",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
"xray": {
{
Port: 80,
ContainerName: "xray",
Protocol: "tcp",
isDefinedByContainer: true,
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
actual, err := tc.mft.ExposedPorts()
// THEN
require.NoError(t, err)
require.Equal(t, tc.wantedExposedPorts, actual.PortsForContainer)
})
}
}
func TestLoadBalancedWebService_BuildArgs(t *testing.T) {
mockContextDir := "/root/dir"
testCases := map[string]struct {
in *LoadBalancedWebService
wantedBuildArgs map[string]*DockerBuildArgs
wantedErr error
}{
"error if both build and location are set": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mock-svc"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("web/Dockerfile"),
},
Location: aws.String("mockURI"),
},
},
},
},
},
},
wantedErr: fmt.Errorf(`either "image.build" or "image.location" needs to be specified in the manifest`),
},
"return main container and sidecar container build args": {
in: &LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mock-svc"),
Type: aws.String(manifestinfo.LoadBalancedWebServiceType),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("web/Dockerfile"),
},
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"nginx": {
Image: Union[*string, ImageLocationOrBuild]{
Advanced: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("backend/Dockerfile"),
},
},
},
},
},
},
},
wantedBuildArgs: map[string]*DockerBuildArgs{
"mock-svc": {
Dockerfile: aws.String(filepath.Join(mockContextDir, "web/Dockerfile")),
Context: aws.String(filepath.Join(mockContextDir, filepath.Dir("web/Dockerfile"))),
},
"nginx": {
Dockerfile: aws.String(filepath.Join(mockContextDir, "backend/Dockerfile")),
Context: aws.String(filepath.Join(mockContextDir, filepath.Dir("backend/Dockerfile"))),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got, gotErr := tc.in.BuildArgs(mockContextDir)
// THEN
if gotErr != nil {
require.EqualError(t, gotErr, tc.wantedErr.Error())
} else {
require.Equal(t, tc.wantedBuildArgs, got)
}
})
}
}
func TestNetworkLoadBalancerConfiguration_NLBListeners(t *testing.T) {
testCases := map[string]struct {
in NetworkLoadBalancerConfiguration
wanted []NetworkLoadBalancerListener
}{
"return empty list if there are no Listeners provided": {},
"return non empty list if main listener is provided": {
in: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("8080/tcp"),
TargetContainer: stringP("main"),
},
},
wanted: []NetworkLoadBalancerListener{
{
Port: stringP("8080/tcp"),
TargetContainer: stringP("main"),
},
},
},
"return non empty list if main listener as well as AdditionalListeners are provided": {
in: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("8080/tcp"),
TargetContainer: stringP("main"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("8081/tcp"),
TargetContainer: stringP("main"),
},
{
Port: aws.String("8082/tcp"),
TargetContainer: stringP("main"),
},
},
},
wanted: []NetworkLoadBalancerListener{
{
Port: stringP("8080/tcp"),
TargetContainer: stringP("main"),
},
{
Port: stringP("8081/tcp"),
TargetContainer: stringP("main"),
},
{
Port: stringP("8082/tcp"),
TargetContainer: stringP("main"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.NLBListeners()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
| 2,894 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
)
type subnetIDsGetter interface {
SubnetIDs(filters ...ec2.Filter) ([]string, error)
}
type loader interface {
load() error
}
// DynamicWorkloadManifest represents a dynamically populated workload manifest.
type DynamicWorkloadManifest struct {
mft workloadManifest
// Clients required to dynamically populate.
newSubnetIDsGetter func(*session.Session) subnetIDsGetter
}
func newDynamicWorkloadManifest(mft workloadManifest) *DynamicWorkloadManifest {
return &DynamicWorkloadManifest{
mft: mft,
newSubnetIDsGetter: func(s *session.Session) subnetIDsGetter {
return ec2.New(s)
},
}
}
// Manifest returns the manifest content.
func (s *DynamicWorkloadManifest) Manifest() any {
return s.mft
}
// ApplyEnv returns the workload manifest with environment overrides.
// If the environment passed in does not have any overrides then it returns itself.
func (s DynamicWorkloadManifest) ApplyEnv(envName string) (DynamicWorkload, error) {
mft, err := s.mft.applyEnv(envName)
if err != nil {
return nil, err
}
s.mft = mft
return &s, nil
}
// RequiredEnvironmentFeatures returns environment features that are required for this manifest.
func (s *DynamicWorkloadManifest) RequiredEnvironmentFeatures() []string {
return s.mft.requiredEnvironmentFeatures()
}
// Load dynamically populates all fields in the manifest.
func (s *DynamicWorkloadManifest) Load(sess *session.Session) error {
loaders := []loader{
&dynamicSubnets{
cfg: s.mft.subnets(),
client: s.newSubnetIDsGetter(sess),
},
}
return loadAll(loaders)
}
func loadAll(loaders []loader) error {
for _, loader := range loaders {
if err := loader.load(); err != nil {
return err
}
}
return nil
}
| 76 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/manifest/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
type dynamicManifestMock struct {
mockSubnetGetter *mocks.MocksubnetIDsGetter
}
func newMockMftWithTags() workloadManifest {
mockMftWithTags := newDefaultBackendService()
mockMftWithTags.Network.VPC.Placement.Subnets.FromTags = Tags{"foo": StringSliceOrString{
String: aws.String("bar"),
}}
return mockMftWithTags
}
func TestDynamicWorkloadManifest_Load(t *testing.T) {
mockMft := newDefaultBackendService()
testCases := map[string]struct {
inMft workloadManifest
setupMocks func(m dynamicManifestMock)
wantedSubnetIDs []string
wantedError error
}{
"error if fail to get subnet IDs from tags": {
inMft: newMockMftWithTags(),
setupMocks: func(m dynamicManifestMock) {
m.mockSubnetGetter.EXPECT().SubnetIDs(gomock.Any()).Return(nil, errors.New("some error"))
},
wantedError: fmt.Errorf("get subnet IDs: some error"),
},
"success with subnet IDs from tags": {
inMft: newMockMftWithTags(),
setupMocks: func(m dynamicManifestMock) {
m.mockSubnetGetter.EXPECT().SubnetIDs(ec2.FilterForTags("foo", "bar")).Return([]string{"id1", "id2"}, nil)
},
wantedSubnetIDs: []string{"id1", "id2"},
},
"success with no subnets": {
inMft: mockMft,
setupMocks: func(m dynamicManifestMock) {},
wantedSubnetIDs: []string{},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
m := dynamicManifestMock{
mockSubnetGetter: mocks.NewMocksubnetIDsGetter(ctrl),
}
tc.setupMocks(m)
dyn := &DynamicWorkloadManifest{
mft: tc.inMft,
newSubnetIDsGetter: func(s *session.Session) subnetIDsGetter {
return m.mockSubnetGetter
},
}
err := dyn.Load(nil)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.ElementsMatch(t, tc.wantedSubnetIDs, dyn.mft.subnets().IDs)
}
})
}
}
| 93 |
copilot-cli | aws | Go | //go:build localintegration
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
func TestLoadBalancedWebService_InitialManifestIntegration(t *testing.T) {
testCases := map[string]struct {
inProps LoadBalancedWebServiceProps
wantedTestdata string
}{
"default": {
inProps: LoadBalancedWebServiceProps{
WorkloadProps: &WorkloadProps{
Name: "frontend",
Dockerfile: "./frontend/Dockerfile",
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{},
},
Port: 80,
},
wantedTestdata: "lb-svc.yml",
},
"with placement private": {
inProps: LoadBalancedWebServiceProps{
WorkloadProps: &WorkloadProps{
Name: "frontend",
Dockerfile: "./frontend/Dockerfile",
PrivateOnlyEnvironments: []string{
"phonetool",
},
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{},
},
Port: 80,
},
wantedTestdata: "lb-svc-placement-private.yml",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
path := filepath.Join("testdata", tc.wantedTestdata)
wantedBytes, err := os.ReadFile(path)
require.NoError(t, err)
manifest := NewLoadBalancedWebService(&tc.inProps)
// WHEN
tpl, err := manifest.MarshalBinary()
require.NoError(t, err)
// THEN
require.Equal(t, string(wantedBytes), string(tpl))
})
}
}
func TestBackendSvc_InitialManifestIntegration(t *testing.T) {
testCases := map[string]struct {
inProps BackendServiceProps
wantedTestdata string
}{
"without healthcheck and port and with private only environments": {
inProps: BackendServiceProps{
WorkloadProps: WorkloadProps{
Name: "subscribers",
Dockerfile: "./subscribers/Dockerfile",
PrivateOnlyEnvironments: []string{
"phonetool",
},
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{
OSFamily: nil,
Arch: nil,
},
},
},
wantedTestdata: "backend-svc-nohealthcheck-placement.yml",
},
"with custom healthcheck command": {
inProps: BackendServiceProps{
WorkloadProps: WorkloadProps{
Name: "subscribers",
Image: "flask-sample",
},
HealthCheck: ContainerHealthCheck{
Command: []string{"CMD-SHELL", "curl -f http://localhost:8080 || exit 1"},
Interval: durationp(6 * time.Second),
Retries: aws.Int(0),
Timeout: durationp(20 * time.Second),
StartPeriod: durationp(15 * time.Second),
},
Port: 8080,
},
wantedTestdata: "backend-svc-customhealthcheck.yml",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
path := filepath.Join("testdata", tc.wantedTestdata)
wantedBytes, err := os.ReadFile(path)
require.NoError(t, err)
manifest := NewBackendService(tc.inProps)
// WHEN
tpl, err := manifest.MarshalBinary()
require.NoError(t, err)
// THEN
require.Equal(t, string(wantedBytes), string(tpl))
})
}
}
func TestWorkerSvc_InitialManifestIntegration(t *testing.T) {
testCases := map[string]struct {
inProps WorkerServiceProps
wantedTestdata string
}{
"without subscribe and with private only environments": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
PrivateOnlyEnvironments: []string{
"phonetool",
},
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{
OSFamily: nil,
Arch: nil,
},
},
},
wantedTestdata: "worker-svc-nosubscribe-placement.yml",
},
"with subscribe": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{
OSFamily: nil,
Arch: nil,
},
},
Topics: []TopicSubscription{
{
Name: aws.String("testTopic"),
Service: aws.String("service4TestTopic"),
},
{
Name: aws.String("testTopic2"),
Service: aws.String("service4TestTopic2"),
},
},
},
wantedTestdata: "worker-svc-subscribe.yml",
},
"with fifo topic subscription with default fifo queue": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{
OSFamily: nil,
Arch: nil,
},
},
Topics: []TopicSubscription{
{
Name: aws.String("testTopic.fifo"),
Service: aws.String("service4TestTopic"),
},
},
},
wantedTestdata: "worker-svc-with-default-fifo-queue.yml",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
path := filepath.Join("testdata", tc.wantedTestdata)
wantedBytes, err := os.ReadFile(path)
require.NoError(t, err)
manifest := NewWorkerService(tc.inProps)
// WHEN
tpl, err := manifest.MarshalBinary()
require.NoError(t, err)
// THEN
require.Equal(t, string(wantedBytes), string(tpl))
})
}
}
func TestScheduledJob_InitialManifestIntegration(t *testing.T) {
testCases := map[string]struct {
inProps ScheduledJobProps
wantedTestData string
}{
"without timeout or retries": {
inProps: ScheduledJobProps{
WorkloadProps: &WorkloadProps{
Name: "cuteness-aggregator",
Image: "copilot/cuteness-aggregator",
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{},
},
Schedule: "@weekly",
},
wantedTestData: "scheduled-job-no-timeout-or-retries.yml",
},
"fully specified using cron schedule with placement set to private": {
inProps: ScheduledJobProps{
WorkloadProps: &WorkloadProps{
Name: "cuteness-aggregator",
Dockerfile: "./cuteness-aggregator/Dockerfile",
PrivateOnlyEnvironments: []string{
"phonetool",
},
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{},
},
Schedule: "0 */2 * * *",
Retries: 3,
Timeout: "1h30m",
},
wantedTestData: "scheduled-job-fully-specified-placement.yml",
},
"with timeout and no retries": {
inProps: ScheduledJobProps{
WorkloadProps: &WorkloadProps{
Name: "cuteness-aggregator",
Dockerfile: "./cuteness-aggregator/Dockerfile",
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{},
},
Schedule: "@every 5h",
Retries: 0,
Timeout: "3h",
},
wantedTestData: "scheduled-job-no-retries.yml",
},
"with retries and no timeout": {
inProps: ScheduledJobProps{
WorkloadProps: &WorkloadProps{
Name: "cuteness-aggregator",
Dockerfile: "./cuteness-aggregator/Dockerfile",
},
Platform: PlatformArgsOrString{
PlatformString: nil,
PlatformArgs: PlatformArgs{},
},
Schedule: "@every 5h",
Retries: 5,
},
wantedTestData: "scheduled-job-no-timeout.yml",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
path := filepath.Join("testdata", tc.wantedTestData)
wantedBytes, err := os.ReadFile(path)
require.NoError(t, err)
manifest := NewScheduledJob(&tc.inProps)
// WHEN
tpl, err := manifest.MarshalBinary()
require.NoError(t, err)
// THEN
require.Equal(t, string(wantedBytes), string(tpl))
})
}
}
func TestEnvironment_InitialManifestIntegration(t *testing.T) {
testCases := map[string]struct {
inProps EnvironmentProps
wantedTestData string
}{
"fully configured with customized vpc resources": {
inProps: EnvironmentProps{
Name: "test",
CustomConfig: &config.CustomizeEnv{
VPCConfig: &config.AdjustVPC{
CIDR: "mock-cidr-0",
AZs: []string{"mock-az-1", "mock-az-2"},
PublicSubnetCIDRs: []string{"mock-cidr-1", "mock-cidr-2"},
PrivateSubnetCIDRs: []string{"mock-cidr-3", "mock-cidr-4"},
},
ImportCertARNs: []string{"mock-cert-1", "mock-cert-2"},
InternalALBSubnets: []string{"mock-subnet-id-3", "mock-subnet-id-4"},
},
Telemetry: &config.Telemetry{
EnableContainerInsights: false,
},
},
wantedTestData: "environment-adjust-vpc.yml",
},
"fully configured with customized vpc resources including imported private subnets": {
inProps: EnvironmentProps{
Name: "test",
CustomConfig: &config.CustomizeEnv{
ImportVPC: &config.ImportVPC{
ID: "mock-vpc-id",
PrivateSubnetIDs: []string{"mock-subnet-id-3", "mock-subnet-id-4"},
},
ImportCertARNs: []string{"mock-cert-1", "mock-cert-2"},
InternalALBSubnets: []string{"mock-subnet-id-3", "mock-subnet-id-4"},
EnableInternalALBVPCIngress: false,
},
Telemetry: &config.Telemetry{
EnableContainerInsights: false,
},
},
wantedTestData: "environment-adjust-vpc-private-subnets.yml",
},
"fully configured with imported vpc resources": {
inProps: EnvironmentProps{
Name: "test",
CustomConfig: &config.CustomizeEnv{
ImportVPC: &config.ImportVPC{
ID: "mock-vpc-id",
PublicSubnetIDs: []string{"mock-subnet-id-1", "mock-subnet-id-2"},
PrivateSubnetIDs: []string{"mock-subnet-id-3", "mock-subnet-id-4"},
},
ImportCertARNs: []string{"mock-cert-1", "mock-cert-2"},
},
Telemetry: &config.Telemetry{
EnableContainerInsights: true,
},
},
wantedTestData: "environment-import-vpc.yml",
},
"basic manifest": {
inProps: EnvironmentProps{
Name: "test",
},
wantedTestData: "environment-default.yml",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
path := filepath.Join("testdata", tc.wantedTestData)
wantedBytes, err := os.ReadFile(path)
require.NoError(t, err)
manifest := NewEnvironment(&tc.inProps)
// WHEN
tpl, err := manifest.MarshalBinary()
require.NoError(t, err)
// THEN
require.Equal(t, string(wantedBytes), string(tpl))
})
}
}
func TestPipelineManifest_InitialManifest_Integration(t *testing.T) {
testCases := map[string]struct {
inProvider Provider
inStages []PipelineStage
wantedTestData string
wantedError error
}{
"basic pipeline manifest": {
inProvider: &githubProvider{
properties: &GitHubProperties{
RepositoryURL: "mock-url",
Branch: "main",
},
},
inStages: []PipelineStage{
{
Name: "test",
},
{
Name: "prod",
},
},
wantedTestData: "pipeline-basic.yml",
},
"environment pipeline manifest with template configurations": {
inProvider: &githubProvider{
properties: &GitHubProperties{
RepositoryURL: "mock-url",
Branch: "main",
},
},
inStages: []PipelineStage{
{
Name: "test",
Deployments: Deployments{
"deploy-env": &Deployment{
TemplatePath: "infrastructure/test.env.yml",
TemplateConfig: "infrastructure/test.env.params.json",
StackName: "app-test",
},
},
},
{
Name: "prod",
Deployments: Deployments{
"deploy-env": &Deployment{
TemplatePath: "infrastructure/prod.env.yml",
TemplateConfig: "infrastructure/prod.env.params.json",
StackName: "app-prod",
},
},
},
},
wantedTestData: "pipeline-environment.yml",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
path := filepath.Join("testdata", tc.wantedTestData)
wantedBytes, err := os.ReadFile(path)
require.NoError(t, err)
manifest, err := NewPipeline("mock-pipeline", tc.inProvider, tc.inStages)
require.NoError(t, err)
// WHEN
b, err := manifest.MarshalBinary()
// THEN
require.Equal(t, string(wantedBytes), string(b))
require.NoError(t, err)
})
}
}
| 486 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/fatih/structs"
"gopkg.in/yaml.v3"
)
// Valid source providers for Copilot Pipelines.
const (
GithubProviderName = "GitHub"
GithubV1ProviderName = "GitHubV1"
CodeCommitProviderName = "CodeCommit"
BitbucketProviderName = "Bitbucket"
)
const pipelineManifestPath = "cicd/pipeline.yml"
// PipelineProviders is the list of all available source integrations.
var PipelineProviders = []string{
GithubProviderName,
CodeCommitProviderName,
BitbucketProviderName,
}
// Provider defines a source of the artifacts
// that will be built and deployed via a pipeline
type Provider interface {
fmt.Stringer
Name() string
Properties() map[string]interface{}
}
type githubV1Provider struct {
properties *GitHubV1Properties
}
func (p *githubV1Provider) Name() string {
return GithubV1ProviderName
}
func (p *githubV1Provider) String() string {
return GithubProviderName
}
func (p *githubV1Provider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
type githubProvider struct {
properties *GitHubProperties
}
func (p *githubProvider) Name() string {
return GithubProviderName
}
func (p *githubProvider) String() string {
return GithubProviderName
}
func (p *githubProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
type codecommitProvider struct {
properties *CodeCommitProperties
}
func (p *codecommitProvider) Name() string {
return CodeCommitProviderName
}
func (p *codecommitProvider) String() string {
return CodeCommitProviderName
}
func (p *codecommitProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
type bitbucketProvider struct {
properties *BitbucketProperties
}
func (p *bitbucketProvider) Name() string {
return BitbucketProviderName
}
func (p *bitbucketProvider) String() string {
return BitbucketProviderName
}
func (p *bitbucketProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
// GitHubV1Properties contain information for configuring a Githubv1
// source provider.
type GitHubV1Properties struct {
// use tag from https://godoc.org/github.com/fatih/structs#example-Map--Tags
// to specify the name of the field in the output properties
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
GithubSecretIdKeyName string `structs:"access_token_secret" yaml:"access_token_secret"`
}
// GitHubProperties contains information for configuring a GitHubv2
// source provider.
type GitHubProperties struct {
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
}
// BitbucketProperties contains information for configuring a Bitbucket
// source provider.
type BitbucketProperties struct {
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
}
// CodeCommitProperties contains information for configuring a CodeCommit
// source provider.
type CodeCommitProperties struct {
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
}
// NewProvider creates a source provider based on the type of
// the provided provider-specific configurations
func NewProvider(configs interface{}) (Provider, error) {
switch props := configs.(type) {
case *GitHubV1Properties:
return &githubV1Provider{
properties: props,
}, nil
case *GitHubProperties:
return &githubProvider{
properties: props,
}, nil
case *CodeCommitProperties:
return &codecommitProvider{
properties: props,
}, nil
case *BitbucketProperties:
return &bitbucketProvider{
properties: props,
}, nil
default:
return nil, &ErrUnknownProvider{unknownProviderProperties: props}
}
}
// PipelineSchemaMajorVersion is the major version number
// of the pipeline manifest schema
type PipelineSchemaMajorVersion int
const (
// Ver1 is the current schema major version of the pipelines/*/manifest.yml file.
Ver1 PipelineSchemaMajorVersion = iota + 1
)
// Pipeline contains information that defines the relationship
// and deployment ordering of your environments.
type Pipeline struct {
// Name of the pipeline
Name string `yaml:"name"`
Version PipelineSchemaMajorVersion `yaml:"version"`
Source *Source `yaml:"source"`
Build *Build `yaml:"build"`
Stages []PipelineStage `yaml:"stages"`
parser template.Parser
}
// Source defines the source of the artifacts to be built and deployed.
type Source struct {
ProviderName string `yaml:"provider"`
Properties map[string]interface{} `yaml:"properties"`
}
// Build defines the build project to build and test image.
type Build struct {
Image string `yaml:"image"`
Buildspec string `yaml:"buildspec,omitempty"`
AdditionalPolicy struct {
Document yaml.Node `yaml:"PolicyDocument,omitempty"`
} `yaml:"additional_policy,omitempty"`
}
// PipelineStage represents a stage in the pipeline manifest
type PipelineStage struct {
Name string `yaml:"name"`
RequiresApproval bool `yaml:"requires_approval,omitempty"`
TestCommands []string `yaml:"test_commands,omitempty"`
Deployments Deployments `yaml:"deployments,omitempty"`
}
// Deployments represent a directed graph of cloudformation deployments.
type Deployments map[string]*Deployment
// Deployment is a cloudformation stack deployment configuration.
type Deployment struct {
StackName string `yaml:"stack_name"`
TemplatePath string `yaml:"template_path"`
TemplateConfig string `yaml:"template_config"`
DependsOn []string `yaml:"depends_on"`
}
// NewPipeline returns a pipeline manifest object.
func NewPipeline(pipelineName string, provider Provider, stages []PipelineStage) (*Pipeline, error) {
// TODO: #221 Do more validations
if len(stages) == 0 {
return nil, fmt.Errorf("a pipeline %s can not be created without a deployment stage",
pipelineName)
}
return &Pipeline{
Name: pipelineName,
Version: Ver1,
Source: &Source{
ProviderName: provider.Name(),
Properties: provider.Properties(),
},
Stages: stages,
parser: template.New(),
}, nil
}
// MarshalBinary serializes the pipeline manifest object into byte array that
// represents the pipeline.yml document.
func (m *Pipeline) MarshalBinary() ([]byte, error) {
content, err := m.parser.Parse(pipelineManifestPath, *m)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// UnmarshalPipeline deserializes the YAML input stream into a pipeline
// manifest object. It returns an error if any issue occurs during
// deserialization or the YAML input contains invalid fields.
func UnmarshalPipeline(in []byte) (*Pipeline, error) {
pm := Pipeline{}
err := yaml.Unmarshal(in, &pm)
if err != nil {
return nil, err
}
var version PipelineSchemaMajorVersion
if version, err = validateVersion(&pm); err != nil {
return nil, err
}
switch version {
case Ver1:
return &pm, nil
}
// we should never reach here, this is just to make the compiler happy
return nil, errors.New("unexpected error occurs while unmarshalling manifest.yml")
}
// IsCodeStarConnection indicates to the manifest if this source requires a CSC connection.
func (s Source) IsCodeStarConnection() bool {
switch s.ProviderName {
case GithubProviderName:
return true
case BitbucketProviderName:
return true
default:
return false
}
}
func validateVersion(pm *Pipeline) (PipelineSchemaMajorVersion, error) {
switch pm.Version {
case Ver1:
return Ver1, nil
default:
return pm.Version,
&ErrInvalidPipelineManifestVersion{
invalidVersion: pm.Version,
}
}
}
| 284 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"bytes"
"errors"
"fmt"
"testing"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/aws/copilot-cli/internal/pkg/template/mocks"
"github.com/fatih/structs"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
const (
defaultGHBranch = "main"
defaultCCBranch = "main"
)
func TestNewProvider(t *testing.T) {
testCases := map[string]struct {
providerConfig interface{}
expectedErr error
}{
"successfully create GitHub provider": {
providerConfig: &GitHubProperties{
RepositoryURL: "aws/amazon-ecs-cli-v2",
Branch: defaultGHBranch,
},
},
"successfully create CodeCommit provider": {
providerConfig: &CodeCommitProperties{
RepositoryURL: "https://us-west-2.console.aws.amazon.com/codesuite/codecommit/repositories/wings/browse",
Branch: defaultCCBranch,
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
_, err := NewProvider(tc.providerConfig)
if tc.expectedErr != nil {
require.EqualError(t, err, tc.expectedErr.Error())
} else {
require.NoError(t, err, "unexpected error while calling NewProvider()")
}
})
}
}
func TestNewPipelineManifest(t *testing.T) {
const pipelineName = "pipepiper"
testCases := map[string]struct {
beforeEach func() error
provider Provider
inputStages []PipelineStage
expectedManifest *Pipeline
expectedErr error
}{
"errors out when no stage provided": {
provider: func() Provider {
p, err := NewProvider(&GitHubProperties{
RepositoryURL: "aws/amazon-ecs-cli-v2",
Branch: defaultGHBranch,
})
require.NoError(t, err, "failed to create provider")
return p
}(),
expectedErr: fmt.Errorf("a pipeline %s can not be created without a deployment stage",
pipelineName),
},
"happy case with non-default stages": {
provider: func() Provider {
p, err := NewProvider(&GitHubProperties{
RepositoryURL: "aws/amazon-ecs-cli-v2",
Branch: defaultGHBranch,
})
require.NoError(t, err, "failed to create provider")
return p
}(),
inputStages: []PipelineStage{
{
Name: "chicken",
RequiresApproval: false,
},
{
Name: "wings",
RequiresApproval: true,
},
},
expectedManifest: &Pipeline{
Name: "pipepiper",
Version: Ver1,
Source: &Source{
ProviderName: "GitHub",
Properties: structs.Map(GitHubProperties{
RepositoryURL: "aws/amazon-ecs-cli-v2",
Branch: defaultGHBranch,
}),
},
Stages: []PipelineStage{
{
Name: "chicken",
RequiresApproval: false,
},
{
Name: "wings",
RequiresApproval: true,
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
expectedBytes, err := yaml.Marshal(tc.expectedManifest)
require.NoError(t, err)
// WHEN
m, err := NewPipeline(pipelineName, tc.provider, tc.inputStages)
// THEN
if tc.expectedErr != nil {
require.EqualError(t, err, tc.expectedErr.Error())
} else {
actualBytes, err := yaml.Marshal(m)
require.NoError(t, err)
require.Equal(t, expectedBytes, actualBytes, "the manifest is different from the expected")
}
})
}
}
func TestPipelineManifest_MarshalBinary(t *testing.T) {
testCases := map[string]struct {
mockDependencies func(ctrl *gomock.Controller, manifest *Pipeline)
wantedBinary []byte
wantedError error
}{
"error parsing template": {
mockDependencies: func(ctrl *gomock.Controller, manifest *Pipeline) {
m := mocks.NewMockParser(ctrl)
manifest.parser = m
m.EXPECT().Parse(pipelineManifestPath, *manifest).Return(nil, errors.New("some error"))
},
wantedError: errors.New("some error"),
},
"returns rendered content": {
mockDependencies: func(ctrl *gomock.Controller, manifest *Pipeline) {
m := mocks.NewMockParser(ctrl)
manifest.parser = m
m.EXPECT().Parse(pipelineManifestPath, *manifest).Return(&template.Content{Buffer: bytes.NewBufferString("hello")}, nil)
},
wantedBinary: []byte("hello"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
manifest := &Pipeline{}
tc.mockDependencies(ctrl, manifest)
// WHEN
b, err := manifest.MarshalBinary()
// THEN
require.Equal(t, tc.wantedError, err)
require.Equal(t, tc.wantedBinary, b)
})
}
}
func TestUnmarshalPipeline(t *testing.T) {
testCases := map[string]struct {
inContent string
expectedManifest *Pipeline
expectedErr error
}{
"invalid pipeline schema version": {
inContent: `
name: pipepiper
version: -1
source:
provider: GitHub
properties:
repository: aws/somethingCool
branch: main
stages:
-
name: test
-
name: prod
`,
expectedErr: &ErrInvalidPipelineManifestVersion{
invalidVersion: PipelineSchemaMajorVersion(-1),
},
},
"invalid pipeline.yml": {
inContent: `corrupted yaml`,
expectedErr: errors.New("yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `corrupt...` into manifest.Pipeline"),
},
"valid pipeline.yml without build": {
inContent: `
name: pipepiper
version: 1
source:
provider: GitHub
properties:
repository: aws/somethingCool
access_token_secret: "github-token-badgoose-backend"
branch: main
stages:
-
name: chicken
test_commands: []
-
name: wings
test_commands: []
`,
expectedManifest: &Pipeline{
Name: "pipepiper",
Version: Ver1,
Source: &Source{
ProviderName: "GitHub",
Properties: map[string]interface{}{
"access_token_secret": "github-token-badgoose-backend",
"repository": "aws/somethingCool",
"branch": defaultGHBranch,
},
},
Stages: []PipelineStage{
{
Name: "chicken",
TestCommands: []string{},
},
{
Name: "wings",
TestCommands: []string{},
},
},
},
},
"valid pipeline.yml with build": {
inContent: `
name: pipepiper
version: 1
source:
provider: GitHub
properties:
repository: aws/somethingCool
access_token_secret: "github-token-badgoose-backend"
branch: main
build:
image: aws/codebuild/standard:3.0
stages:
-
name: chicken
test_commands: []
`,
expectedManifest: &Pipeline{
Name: "pipepiper",
Version: Ver1,
Source: &Source{
ProviderName: "GitHub",
Properties: map[string]interface{}{
"access_token_secret": "github-token-badgoose-backend",
"repository": "aws/somethingCool",
"branch": defaultGHBranch,
},
},
Build: &Build{
Image: "aws/codebuild/standard:3.0",
},
Stages: []PipelineStage{
{
Name: "chicken",
TestCommands: []string{},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
m, err := UnmarshalPipeline([]byte(tc.inContent))
if tc.expectedErr != nil {
require.EqualError(t, err, tc.expectedErr.Error())
} else {
require.Equal(t, tc.expectedManifest, m)
}
})
}
}
| 320 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/imdario/mergo"
)
const (
requestDrivenWebSvcManifestPath string = "workloads/services/rd-web/manifest.yml"
)
// RequestDrivenWebService holds the configuration to create a Request-Driven Web Service.
type RequestDrivenWebService struct {
Workload `yaml:",inline"`
RequestDrivenWebServiceConfig `yaml:",inline"`
Environments map[string]*RequestDrivenWebServiceConfig `yaml:",flow"` // Fields to override per environment.
parser template.Parser
}
func (s *RequestDrivenWebService) subnets() *SubnetListOrArgs {
return &s.Network.VPC.Placement.Subnets
}
// RequestDrivenWebServiceConfig holds the configuration that can be overridden per environments.
type RequestDrivenWebServiceConfig struct {
RequestDrivenWebServiceHttpConfig `yaml:"http,flow"`
InstanceConfig AppRunnerInstanceConfig `yaml:",inline"`
ImageConfig ImageWithPort `yaml:"image"`
Variables map[string]Variable `yaml:"variables"`
Secrets map[string]Secret `yaml:"secrets"`
StartCommand *string `yaml:"command"`
Tags map[string]string `yaml:"tags"`
PublishConfig PublishConfig `yaml:"publish"`
Network RequestDrivenWebServiceNetworkConfig `yaml:"network"`
Observability Observability `yaml:"observability"`
Count *string `yaml:"count"`
}
// Observability holds configuration for observability to the service.
type Observability struct {
Tracing *string `yaml:"tracing"`
}
func (o *Observability) isEmpty() bool {
return o.Tracing == nil
}
// ImageWithPort represents a container image with an exposed port.
type ImageWithPort struct {
Image Image `yaml:",inline"`
Port *uint16 `yaml:"port"`
}
// RequestDrivenWebServiceNetworkConfig represents options for network connection to AWS resources for a Request-Driven Web Service.
type RequestDrivenWebServiceNetworkConfig struct {
VPC rdwsVpcConfig `yaml:"vpc"`
}
// IsEmpty returns empty if the struct has all zero members.
func (c *RequestDrivenWebServiceNetworkConfig) IsEmpty() bool {
return c.VPC.isEmpty()
}
func (c *RequestDrivenWebServiceNetworkConfig) requiredEnvFeatures() []string {
if aws.StringValue((*string)(c.VPC.Placement.PlacementString)) == string(PrivateSubnetPlacement) {
return []string{template.NATFeatureName}
}
return nil
}
type rdwsVpcConfig struct {
Placement PlacementArgOrString `yaml:"placement"`
}
func (c *rdwsVpcConfig) isEmpty() bool {
return c.Placement.IsEmpty()
}
// RequestDrivenWebServiceHttpConfig represents options for configuring http.
type RequestDrivenWebServiceHttpConfig struct {
HealthCheckConfiguration HealthCheckArgsOrString `yaml:"healthcheck"`
Alias *string `yaml:"alias"`
Private Union[*bool, VPCEndpoint] `yaml:"private"`
}
// VPCEndpoint is used to configure a pre-existing VPC endpoint.
type VPCEndpoint struct {
Endpoint *string `yaml:"endpoint"`
}
// AppRunnerInstanceConfig contains the instance configuration properties for an App Runner service.
type AppRunnerInstanceConfig struct {
CPU *int `yaml:"cpu"`
Memory *int `yaml:"memory"`
Platform PlatformArgsOrString `yaml:"platform,omitempty"`
}
// RequestDrivenWebServiceProps contains properties for creating a new request-driven web service manifest.
type RequestDrivenWebServiceProps struct {
*WorkloadProps
Port uint16
Platform PlatformArgsOrString
Private bool
}
// NewRequestDrivenWebService creates a new Request-Driven Web Service manifest with default values.
func NewRequestDrivenWebService(props *RequestDrivenWebServiceProps) *RequestDrivenWebService {
svc := newDefaultRequestDrivenWebService()
svc.Name = aws.String(props.Name)
svc.RequestDrivenWebServiceConfig.ImageConfig.Image.Location = stringP(props.Image)
svc.RequestDrivenWebServiceConfig.ImageConfig.Image.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile)
svc.RequestDrivenWebServiceConfig.ImageConfig.Port = aws.Uint16(props.Port)
svc.RequestDrivenWebServiceConfig.InstanceConfig.Platform = props.Platform
if props.Private {
svc.Private = BasicToUnion[*bool, VPCEndpoint](aws.Bool(true))
svc.Network.VPC.Placement.PlacementString = (*PlacementString)(aws.String("private"))
}
svc.parser = template.New()
return svc
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (s *RequestDrivenWebService) MarshalBinary() ([]byte, error) {
content, err := s.parser.Parse(requestDrivenWebSvcManifestPath, *s)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// Port returns the exposed the exposed port in the manifest.
// A RequestDrivenWebService always has a port exposed therefore the boolean is always true.
func (s *RequestDrivenWebService) Port() (port uint16, ok bool) {
return aws.Uint16Value(s.ImageConfig.Port), true
}
// Publish returns the list of topics where notifications can be published.
func (s *RequestDrivenWebService) Publish() []Topic {
return s.RequestDrivenWebServiceConfig.PublishConfig.publishedTopics()
}
// ContainerPlatform returns the platform for the service.
func (s *RequestDrivenWebService) ContainerPlatform() string {
if s.InstanceConfig.Platform.IsEmpty() {
return platformString(OSLinux, ArchAMD64)
}
return platformString(s.InstanceConfig.Platform.OS(), s.InstanceConfig.Platform.Arch())
}
// BuildArgs returns a docker.BuildArguments object given a context directory.
func (s *RequestDrivenWebService) BuildArgs(contextDir string) (map[string]*DockerBuildArgs, error) {
required, err := requiresBuild(s.ImageConfig.Image)
if err != nil {
return nil, err
}
// Creating an map to store buildArgs of all sidecar images and main container image.
buildArgsPerContainer := make(map[string]*DockerBuildArgs, 1)
if required {
buildArgsPerContainer[aws.StringValue(s.Name)] = s.ImageConfig.Image.BuildConfig(contextDir)
}
return buildArgsPerContainer, nil
}
func (s RequestDrivenWebService) applyEnv(envName string) (workloadManifest, error) {
overrideConfig, ok := s.Environments[envName]
if !ok {
return &s, nil
}
// Apply overrides to the original service configuration.
for _, t := range defaultTransformers {
err := mergo.Merge(&s, RequestDrivenWebService{
RequestDrivenWebServiceConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithTransformers(t))
if err != nil {
return nil, err
}
}
s.Environments = nil
return &s, nil
}
func (s *RequestDrivenWebService) requiredEnvironmentFeatures() []string {
var features []string
features = append(features, s.Network.requiredEnvFeatures()...)
return features
}
// newDefaultRequestDrivenWebService returns an empty RequestDrivenWebService with only the default values set.
func newDefaultRequestDrivenWebService() *RequestDrivenWebService {
return &RequestDrivenWebService{
Workload: Workload{
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{},
InstanceConfig: AppRunnerInstanceConfig{
CPU: aws.Int(1024),
Memory: aws.Int(2048),
},
},
}
}
| 210 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"bytes"
"errors"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/aws/copilot-cli/internal/pkg/template/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestNewRequestDrivenWebService(t *testing.T) {
testCases := map[string]struct {
input *RequestDrivenWebServiceProps
wantedStruct *RequestDrivenWebService
}{
"should return an instance of RequestDrivenWebService": {
input: &RequestDrivenWebServiceProps{
WorkloadProps: &WorkloadProps{
Name: "frontend",
Dockerfile: "./Dockerfile",
},
Port: uint16(80),
},
wantedStruct: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("frontend"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
Port: aws.Uint16(80),
},
InstanceConfig: AppRunnerInstanceConfig{
CPU: aws.Int(1024),
Memory: aws.Int(2048),
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
svc := NewRequestDrivenWebService(tc.input)
require.Equal(t, tc.wantedStruct.Name, svc.Name)
require.Equal(t, tc.wantedStruct.Type, svc.Type)
require.Equal(t, tc.wantedStruct.Environments, svc.Environments)
require.Equal(t, tc.wantedStruct.InstanceConfig, svc.InstanceConfig)
require.Equal(t, tc.wantedStruct.ImageConfig, svc.ImageConfig)
require.Equal(t, tc.wantedStruct.Tags, svc.Tags)
require.Equal(t, tc.wantedStruct.Variables, svc.Variables)
})
}
}
func TestRequestDrivenWebService_UnmarshalYaml(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct RequestDrivenWebService
wantedError error
}{
"should unmarshal basic yaml configuration": {
inContent: []byte(
"name: test-service\n" +
"type: Request-Driven Web Service\n" +
"cpu: 512\n" +
"memory: 1024\n" +
"image:\n" +
" build: ./Dockerfile\n" +
" port: 80\n",
),
wantedStruct: RequestDrivenWebService{
Workload: Workload{
Name: aws.String("test-service"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("./Dockerfile"),
},
},
},
Port: aws.Uint16(80),
},
InstanceConfig: AppRunnerInstanceConfig{
CPU: aws.Int(512),
Memory: aws.Int(1024),
},
},
},
},
"should unmarshal image location": {
inContent: []byte(
"image:\n" +
" location: test-repository/image@digest\n",
),
wantedStruct: RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("test-repository/image@digest"),
},
},
},
},
},
},
"should unmarshal image build configuration": {
inContent: []byte(
"image:\n" +
" build:\n" +
" dockerfile: ./Dockerfile\n" +
" context: context/dir\n" +
" target: build-stage\n" +
" cache_from:\n" +
" - image:tag\n" +
" args:\n" +
" a: 1\n" +
" b: 2\n",
),
wantedStruct: RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Context: aws.String("context/dir"),
Dockerfile: aws.String("./Dockerfile"),
Target: aws.String("build-stage"),
CacheFrom: []string{"image:tag"},
Args: map[string]string{"a": "1", "b": "2"},
},
},
},
},
},
},
},
},
"should unmarshal environment variables": {
inContent: []byte(
"variables:\n" +
" LOG_LEVEL: info\n" +
" NODE_ENV: development\n",
),
wantedStruct: RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("info"),
},
},
"NODE_ENV": {
stringOrFromCFN{
Plain: stringP("development"),
},
},
},
},
},
},
"should unmarshal tags": {
inContent: []byte(
"tags:\n" +
" owner: account-id\n" +
" project: my-project\n",
),
wantedStruct: RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
Tags: map[string]string{
"owner": "account-id",
"project": "my-project",
},
},
},
},
"should unmarshal http configuration": {
inContent: []byte(
"http:\n" +
" healthcheck:\n" +
" path: /healthcheck\n" +
" healthy_threshold: 3\n" +
" unhealthy_threshold: 5\n" +
" interval: 10s\n" +
" timeout: 5s\n" +
" alias: convex.domain.com",
),
wantedStruct: RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
RequestDrivenWebServiceHttpConfig: RequestDrivenWebServiceHttpConfig{
HealthCheckConfiguration: HealthCheckArgsOrString{
Union: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("/healthcheck"),
HealthyThreshold: aws.Int64(3),
UnhealthyThreshold: aws.Int64(5),
Interval: durationp(10 * time.Second),
Timeout: durationp(5 * time.Second),
}),
},
Alias: aws.String("convex.domain.com"),
},
},
},
},
"should unmarshal healthcheck shorthand": {
inContent: []byte(
"http:\n" +
" healthcheck: /healthcheck\n",
),
wantedStruct: RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
RequestDrivenWebServiceHttpConfig: RequestDrivenWebServiceHttpConfig{
HealthCheckConfiguration: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/healthcheck"),
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var svc RequestDrivenWebService
err := yaml.Unmarshal(tc.inContent, &svc)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedStruct, svc)
}
})
}
}
func TestRequestDrivenWebService_MarshalBinary(t *testing.T) {
testCases := map[string]struct {
inManifest *RequestDrivenWebService
wantedBinary []byte
wantedError error
}{
"error parsing template": {
inManifest: &RequestDrivenWebService{},
wantedError: errors.New("test error"),
},
"returns rendered content": {
inManifest: &RequestDrivenWebService{},
wantedBinary: []byte("test content"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockParser := mocks.NewMockParser(ctrl)
tc.inManifest.parser = mockParser
var wantedTemplContent *template.Content = nil
if tc.wantedBinary != nil {
wantedTemplContent = &template.Content{Buffer: bytes.NewBufferString(string(tc.wantedBinary))}
}
mockParser.
EXPECT().
Parse(requestDrivenWebSvcManifestPath, *tc.inManifest, gomock.Any()).
Return(wantedTemplContent, tc.wantedError)
b, err := tc.inManifest.MarshalBinary()
require.Equal(t, tc.wantedError, err)
require.Equal(t, tc.wantedBinary, b)
})
}
}
func TestRequestDrivenWebService_Port(t *testing.T) {
// GIVEN
mft := RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Port: uint16P(80),
},
},
}
// WHEN
actual, ok := mft.Port()
// THEN
require.True(t, ok)
require.Equal(t, uint16(80), actual)
}
func TestRequestDrivenWebService_ContainerPlatform(t *testing.T) {
t.Run("should return platform string with values found in args", func(t *testing.T) {
// GIVEN
mft := RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
InstanceConfig: AppRunnerInstanceConfig{
Platform: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("ososos"),
Arch: aws.String("arch"),
},
},
},
},
}
// WHEN
actual := mft.ContainerPlatform()
// THEN
require.Equal(t, "ososos/arch", actual)
})
t.Run("should return default platform if platform field empty", func(t *testing.T) {
// GIVEN
mft := RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
InstanceConfig: AppRunnerInstanceConfig{
Platform: PlatformArgsOrString{
PlatformString: nil,
},
},
},
}
// WHEN
actual := mft.ContainerPlatform()
// THEN
require.Equal(t, "linux/amd64", actual)
})
}
func TestRequestDrivenWebService_Publish(t *testing.T) {
testCases := map[string]struct {
mft *RequestDrivenWebService
wantedTopics []Topic
}{
"returns nil if there are no topics set": {
mft: &RequestDrivenWebService{},
},
"returns the list of topics if manifest publishes notifications": {
mft: &RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
PublishConfig: PublishConfig{
Topics: []Topic{
{
Name: stringP("hello"),
},
},
},
},
},
wantedTopics: []Topic{
{
Name: stringP("hello"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
actual := tc.mft.Publish()
// THEN
require.Equal(t, tc.wantedTopics, actual)
})
}
}
func TestRequestDrivenWebService_ApplyEnv(t *testing.T) {
testCases := map[string]struct {
in *RequestDrivenWebService
envToApply string
wanted *RequestDrivenWebService
}{
"with image build overridden by image location": {
in: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
},
},
Environments: map[string]*RequestDrivenWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
"with image location overridden by image location": {
in: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("default location"),
},
},
},
},
Environments: map[string]*RequestDrivenWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
},
"with image build overridden by image build": {
in: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
},
},
Environments: map[string]*RequestDrivenWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
"with image location overridden by image build": {
in: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("default location"),
},
},
},
},
Environments: map[string]*RequestDrivenWebServiceConfig{
"prod-iad": {
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &RequestDrivenWebService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("overridden build string"),
},
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
conf, _ := tc.in.applyEnv(tc.envToApply)
// THEN
require.Equal(t, tc.wanted, conf, "returned configuration should have overrides from the environment")
})
}
}
func TestRequestDrivenWebService_RequiredEnvironmentFeatures(t *testing.T) {
testCases := map[string]struct {
mft func(svc *RequestDrivenWebService)
wanted []string
}{
"no feature required by default": {
mft: func(svc *RequestDrivenWebService) {},
},
"nat feature required": {
mft: func(svc *RequestDrivenWebService) {
svc.Network = RequestDrivenWebServiceNetworkConfig{
VPC: rdwsVpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
}
},
wanted: []string{template.NATFeatureName},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
inSvc := RequestDrivenWebService{
Workload: Workload{
Name: aws.String("mock-svc"),
Type: aws.String(manifestinfo.RequestDrivenWebServiceType),
},
}
tc.mft(&inSvc)
got := inSvc.requiredEnvironmentFeatures()
require.Equal(t, tc.wanted, got)
})
}
}
| 668 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/imdario/mergo"
)
const (
staticSiteManifestPath = "workloads/services/static-site/manifest.yml"
)
// StaticSite holds the configuration to configure and upload static assets to the static site service.
type StaticSite struct {
Workload `yaml:",inline"`
StaticSiteConfig `yaml:",inline"`
// Use *StaticSiteConfig because of https://github.com/imdario/mergo/issues/146
Environments map[string]*StaticSiteConfig `yaml:",flow"` // Fields to override per environment.
parser template.Parser
}
// StaticSiteConfig holds the configuration for a static site service.
type StaticSiteConfig struct {
HTTP StaticSiteHTTP `yaml:"http"`
FileUploads []FileUpload `yaml:"files"`
}
// StaticSiteHTTP defines the http configuration for the static site.
type StaticSiteHTTP struct {
Alias string `yaml:"alias"`
}
// FileUpload represents the options for file uploading.
type FileUpload struct {
Source string `yaml:"source"`
Destination string `yaml:"destination"`
Recursive bool `yaml:"recursive"`
Exclude StringSliceOrString `yaml:"exclude"`
Reinclude StringSliceOrString `yaml:"reinclude"`
}
// StaticSiteProps represents the configuration needed to create a static site service.
type StaticSiteProps struct {
Name string
StaticSiteConfig
}
// NewStaticSite creates a new static site service with props.
func NewStaticSite(props StaticSiteProps) *StaticSite {
svc := newDefaultStaticSite()
// Apply overrides.
svc.Name = stringP(props.Name)
svc.FileUploads = props.StaticSiteConfig.FileUploads
svc.parser = template.New()
return svc
}
func newDefaultStaticSite() *StaticSite {
return &StaticSite{
Workload: Workload{
Type: aws.String(manifestinfo.StaticSiteType),
},
}
}
func (s StaticSite) applyEnv(envName string) (workloadManifest, error) {
overrideConfig, ok := s.Environments[envName]
if !ok {
return &s, nil
}
if overrideConfig == nil {
return &s, nil
}
// Apply overrides to the original service s.
for _, t := range defaultTransformers {
err := mergo.Merge(&s, StaticSite{
StaticSiteConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithTransformers(t))
if err != nil {
return nil, err
}
}
s.Environments = nil
return &s, nil
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (s *StaticSite) MarshalBinary() ([]byte, error) {
content, err := s.parser.Parse(staticSiteManifestPath, *s)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// To implement workloadManifest.
func (s *StaticSite) subnets() *SubnetListOrArgs {
return nil
}
// To implement workloadManifest.
func (s *StaticSite) requiredEnvironmentFeatures() []string {
return nil
}
| 113 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/stretchr/testify/require"
)
func TestStaticSite_ApplyEnv(t *testing.T) {
var ()
testCases := map[string]struct {
in *StaticSite
envToApply string
wanted *StaticSite
}{
"without existing environments": {
in: &StaticSite{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.StaticSiteType),
},
StaticSiteConfig: StaticSiteConfig{
FileUploads: []FileUpload{
{
Source: "test",
Destination: "test",
Reinclude: StringSliceOrString{
StringSlice: []string{"test/manifest.yml"},
},
Exclude: StringSliceOrString{
String: aws.String("test/*.yml"),
},
},
},
},
},
envToApply: "prod-iad",
wanted: &StaticSite{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.StaticSiteType),
},
StaticSiteConfig: StaticSiteConfig{
FileUploads: []FileUpload{
{
Source: "test",
Destination: "test",
Reinclude: StringSliceOrString{
StringSlice: []string{"test/manifest.yml"},
},
Exclude: StringSliceOrString{
String: aws.String("test/*.yml"),
},
},
},
},
},
},
"with overrides": {
in: &StaticSite{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.StaticSiteType),
},
StaticSiteConfig: StaticSiteConfig{
FileUploads: []FileUpload{
{
Exclude: StringSliceOrString{
String: aws.String("test/*.yml"),
},
},
},
},
Environments: map[string]*StaticSiteConfig{
"prod-iad": {
FileUploads: []FileUpload{
{
Reinclude: StringSliceOrString{
StringSlice: []string{"test/manifest.yml"},
},
},
},
},
},
},
envToApply: "prod-iad",
wanted: &StaticSite{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.StaticSiteType),
},
StaticSiteConfig: StaticSiteConfig{
FileUploads: []FileUpload{
{
Reinclude: StringSliceOrString{
StringSlice: []string{"test/manifest.yml"},
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
conf, _ := tc.in.applyEnv(tc.envToApply)
// THEN
require.Equal(t, tc.wanted, conf, "returned configuration should have overrides from the environment")
})
}
}
| 123 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/template"
"gopkg.in/yaml.v3"
)
var (
errUnmarshalEFSOpts = errors.New(`cannot unmarshal "efs" field into bool or map`)
)
// Storage represents the options for external and native storage.
type Storage struct {
Ephemeral *int `yaml:"ephemeral"`
ReadonlyRootFS *bool `yaml:"readonly_fs"`
Volumes map[string]*Volume `yaml:"volumes"` // NOTE: keep the pointers because `mergo` doesn't automatically deep merge map's value unless it's a pointer type.
}
// IsEmpty returns empty if the struct has all zero members.
func (s *Storage) IsEmpty() bool {
return s.Ephemeral == nil && s.Volumes == nil && s.ReadonlyRootFS == nil
}
func (s *Storage) requiredEnvFeatures() []string {
if s.hasManagedFS() {
return []string{template.EFSFeatureName}
}
return nil
}
func (s *Storage) hasManagedFS() bool {
for _, v := range s.Volumes {
if v.EmptyVolume() || !v.EFS.UseManagedFS() {
continue
}
return true
}
return false
}
// Volume is an abstraction which merges the MountPoint and Volumes concepts from the ECS Task Definition
type Volume struct {
EFS EFSConfigOrBool `yaml:"efs"`
MountPointOpts `yaml:",inline"`
}
// EmptyVolume returns true if the EFS configuration is nil or explicitly/implicitly disabled.
func (v *Volume) EmptyVolume() bool {
if v.EFS.IsEmpty() {
return true
}
// Respect Bool value first: return true if EFS is explicitly disabled.
if v.EFS.Disabled() {
return true
}
return false
}
// MountPointOpts is shared between Volumes for the main container and MountPoints for sidecars.
type MountPointOpts struct {
ContainerPath *string `yaml:"path"`
ReadOnly *bool `yaml:"read_only"`
}
// SidecarMountPoint is used to let sidecars mount volumes defined in `storage`
type SidecarMountPoint struct {
SourceVolume *string `yaml:"source_volume"`
MountPointOpts `yaml:",inline"`
}
// EFSVolumeConfiguration holds options which tell ECS how to reach out to the EFS filesystem.
type EFSVolumeConfiguration struct {
FileSystemID *string `yaml:"id"` // Required. Can be specified as "copilot" or "managed" magic keys.
RootDirectory *string `yaml:"root_dir"` // Default "/". For BYO EFS.
AuthConfig AuthorizationConfig `yaml:"auth"` // Auth config for BYO EFS.
UID *uint32 `yaml:"uid"` // UID for managed EFS.
GID *uint32 `yaml:"gid"` // GID for managed EFS.
}
// IsEmpty returns empty if the struct has all zero members.
func (e *EFSVolumeConfiguration) IsEmpty() bool {
return e.FileSystemID == nil && e.RootDirectory == nil && e.AuthConfig.IsEmpty() && e.UID == nil && e.GID == nil
}
// EFSConfigOrBool contains custom unmarshaling logic for the `efs` field in the manifest.
type EFSConfigOrBool struct {
Advanced EFSVolumeConfiguration
Enabled *bool
}
// IsEmpty returns empty if the struct has all zero members.
func (e *EFSConfigOrBool) IsEmpty() bool {
return e.Advanced.IsEmpty() && e.Enabled == nil
}
// UnmarshalYAML implements the yaml(v3) interface. It allows EFS to be specified as a
// string or a struct alternately.
func (e *EFSConfigOrBool) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&e.Advanced); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !e.Advanced.IsEmpty() {
if err := e.Advanced.isValid(); err != nil {
// NOTE: `e.Advanced` contains exclusive fields.
// Validating that exclusive fields cannot be set simultaneously is necessary during `UnmarshalYAML`
// because the `ApplyEnv` stage assumes that no exclusive fields are set together.
// Not validating it during `UnmarshalYAML` would potentially cause an invalid manifest being deemed valid.
return err
}
// Unmarshaled successfully to e.Config, unset e.ID, and return.
e.Enabled = nil
return nil
}
if err := value.Decode(&e.Enabled); err != nil {
return errUnmarshalEFSOpts
}
return nil
}
// UseManagedFS returns true if the user has specified EFS as a bool, or has only specified UID and GID.
func (e *EFSConfigOrBool) UseManagedFS() bool {
// Respect explicitly enabled or disabled value first.
if e.Enabled != nil {
return aws.BoolValue(e.Enabled)
}
// Check whether we're implicitly enabling managed EFS via UID/GID.
return !e.Advanced.EmptyUIDConfig()
}
// Disabled returns true if Enabled is explicitly set to false.
// This function is useful for checking that the EFS config has been intentionally turned off
// and whether we should ignore any values of the struct which have been populated erroneously.
func (e *EFSConfigOrBool) Disabled() bool {
if e.Enabled != nil && !aws.BoolValue(e.Enabled) {
return true
}
return false
}
// EmptyBYOConfig returns true if the `id`, `root_directory`, and `auth` fields are all empty.
// This would mean that no custom EFS information has been specified.
func (e *EFSVolumeConfiguration) EmptyBYOConfig() bool {
return e.FileSystemID == nil && e.AuthConfig.IsEmpty() && e.RootDirectory == nil
}
// EmptyUIDConfig returns true if the `uid` and `gid` fields are empty. These fields are mutually exclusive
// with BYO EFS. If they are nonempty, then we should use managed EFS instead.
func (e *EFSVolumeConfiguration) EmptyUIDConfig() bool {
return e.UID == nil && e.GID == nil
}
func (e *EFSVolumeConfiguration) unsetBYOConfig() {
e.FileSystemID = nil
e.AuthConfig = AuthorizationConfig{}
e.RootDirectory = nil
}
func (e *EFSVolumeConfiguration) unsetUIDConfig() {
e.UID = nil
e.GID = nil
}
func (e *EFSVolumeConfiguration) isValid() error {
if !e.EmptyBYOConfig() && !e.EmptyUIDConfig() {
return &errFieldMutualExclusive{
firstField: "uid/gid",
secondField: "id/root_dir/auth",
}
}
return nil
}
// AuthorizationConfig holds options relating to access points and IAM authorization.
type AuthorizationConfig struct {
IAM *bool `yaml:"iam"` // Default true
AccessPointID *string `yaml:"access_point_id"` // Default ""
}
// IsEmpty returns empty if the struct has all zero members.
func (a *AuthorizationConfig) IsEmpty() bool {
return a.IAM == nil && a.AccessPointID == nil
}
| 197 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
type testVolume struct {
EFS EFSConfigOrBool `yaml:"efs"`
}
func TestEFSConfigOrBool_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
manifest []byte
want testVolume
wantErr string
}{
"simple case": {
manifest: []byte(`
efs:
id: fs-12345`),
want: testVolume{
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-12345"),
},
},
},
},
"with managed FS and custom UID": {
manifest: []byte(`
efs:
uid: 1000
gid: 10000`),
want: testVolume{
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(1000),
GID: aws.Uint32(10000),
},
},
},
},
"with just managed": {
manifest: []byte(`
efs: true`),
want: testVolume{
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
},
},
"with auth": {
manifest: []byte(`
efs:
id: fs-12345
root_directory: "/"
auth:
iam: true
access_point_id: fsap-1234`),
want: testVolume{
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-12345"),
RootDirectory: aws.String("/"),
AuthConfig: AuthorizationConfig{
IAM: aws.Bool(true),
AccessPointID: aws.String("fsap-1234"),
},
},
},
},
},
"invalid": {
manifest: []byte(`
efs:
uid: 1000
gid: 10000
id: 1`),
wantErr: `must specify one, not both, of "uid/gid" and "id/root_dir/auth"`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
v := testVolume{
EFS: EFSConfigOrBool{},
}
// WHEN
err := yaml.Unmarshal(tc.manifest, &v)
// THEN
if tc.wantErr == "" {
require.NoError(t, err)
require.Equal(t, tc.want.EFS.Enabled, v.EFS.Enabled)
require.Equal(t, tc.want.EFS.Advanced.FileSystemID, v.EFS.Advanced.FileSystemID)
require.Equal(t, tc.want.EFS.Advanced.AuthConfig, v.EFS.Advanced.AuthConfig)
require.Equal(t, tc.want.EFS.Advanced.UID, v.EFS.Advanced.UID)
require.Equal(t, tc.want.EFS.Advanced.GID, v.EFS.Advanced.GID)
} else {
require.EqualError(t, err, tc.wantErr)
}
})
}
}
func Test_EmptyVolume(t *testing.T) {
testCases := map[string]struct {
in EFSConfigOrBool
want bool
}{
"with bool set": {
in: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
want: false,
},
"with bool set to false": {
in: EFSConfigOrBool{
Enabled: aws.Bool(false),
},
want: true,
},
"with uid/gid set": {
in: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(1000),
GID: aws.Uint32(10000),
},
},
want: false,
},
"empty": {
in: EFSConfigOrBool{},
want: true,
},
"misconfigured with boolean enabled": {
in: EFSConfigOrBool{
Enabled: aws.Bool(true),
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-1234"),
},
},
want: false,
},
"misconfigured with FSID and UID": {
in: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-12345"),
UID: aws.Uint32(6777),
GID: aws.Uint32(6777),
},
},
want: false,
},
"misconfigured with bool set to false and extra config (should respect bool)": {
in: EFSConfigOrBool{
Enabled: aws.Bool(false),
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(6777),
GID: aws.Uint32(6777),
},
},
want: true,
},
}
for name, tc := range testCases {
v := Volume{
EFS: tc.in,
}
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.want, v.EmptyVolume())
})
}
}
func Test_UseManagedFS(t *testing.T) {
testCases := map[string]struct {
in EFSConfigOrBool
want bool
}{
"with bool set": {
in: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
want: true,
},
"with bool set to false": {
in: EFSConfigOrBool{
Enabled: aws.Bool(false),
},
want: false,
},
"with uid/gid set": {
in: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(1000),
GID: aws.Uint32(10000),
},
},
want: true,
},
"empty": {
in: EFSConfigOrBool{},
want: false,
},
"misconfigured with boolean enabled": {
in: EFSConfigOrBool{
Enabled: aws.Bool(true),
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-1234"),
},
},
want: true,
},
"misconfigured with FSID and UID": {
in: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-12345"),
UID: aws.Uint32(6777),
GID: aws.Uint32(6777),
},
},
want: true,
},
"misconfigured with bool set to false and extra config (should respect bool)": {
in: EFSConfigOrBool{
Enabled: aws.Bool(false),
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(6777),
GID: aws.Uint32(6777),
},
},
want: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.want, tc.in.UseManagedFS())
})
}
}
func TestStorage_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in Storage
wanted bool
}{
"empty storage": {
in: Storage{},
wanted: true,
},
"non empty storage with ReadOnlyFS": {
in: Storage{
ReadonlyRootFS: aws.Bool(true),
},
},
"non empty storage": {
in: Storage{
Volumes: map[string]*Volume{
"volume1": nil,
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.IsEmpty()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestAuthorizationConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in AuthorizationConfig
wanted bool
}{
"empty auth": {
in: AuthorizationConfig{},
wanted: true,
},
"non empty auth": {
in: AuthorizationConfig{
IAM: aws.Bool(false),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.IsEmpty()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
| 311 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"math"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/aws/aws-sdk-go/aws"
"gopkg.in/yaml.v3"
)
// Range contains either a Range or a range configuration for Autoscaling ranges.
type Range struct {
Value *IntRangeBand // Mutually exclusive with RangeConfig
RangeConfig RangeConfig
}
// ExposedPortsIndex holds exposed ports configuration.
type ExposedPortsIndex struct {
WorkloadName string // holds name of the main container
PortsForContainer map[string][]ExposedPort // holds exposed ports list for all the containers
ContainerForPort map[uint16]string // holds port to container mapping
}
func (idx ExposedPortsIndex) mainContainerPort() string {
return idx.containerPortDefinedBy(idx.WorkloadName)
}
// containerPortDefinedBy returns the explicitly defined container port, if there is no port exposed for the container then returns the empty string "".
func (idx ExposedPortsIndex) containerPortDefinedBy(container string) string {
for _, portConfig := range idx.PortsForContainer[container] {
if portConfig.isDefinedByContainer {
return strconv.Itoa(int(portConfig.Port))
}
}
return ""
}
// IsEmpty returns whether Range is empty.
func (r *Range) IsEmpty() bool {
return r.Value == nil && r.RangeConfig.IsEmpty()
}
// Parse extracts the min and max from RangeOpts.
func (r *Range) Parse() (min int, max int, err error) {
if r.Value != nil {
return r.Value.Parse()
}
return aws.IntValue(r.RangeConfig.Min), aws.IntValue(r.RangeConfig.Max), nil
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the RangeOpts
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (r *Range) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&r.RangeConfig); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !r.RangeConfig.IsEmpty() {
// Unmarshaled successfully to r.RangeConfig, unset r.Range, and return.
r.Value = nil
return nil
}
if err := value.Decode(&r.Value); err != nil {
return errUnmarshalRangeOpts
}
return nil
}
// IntRangeBand is a number range with maximum and minimum values.
type IntRangeBand string
// Parse parses Range string and returns the min and max values.
// For example: 1-100 returns 1 and 100.
func (r IntRangeBand) Parse() (min int, max int, err error) {
minMax := strings.Split(string(r), "-")
if len(minMax) != 2 {
return 0, 0, fmt.Errorf("invalid range value %s. Should be in format of ${min}-${max}", string(r))
}
min, err = strconv.Atoi(minMax[0])
if err != nil {
return 0, 0, fmt.Errorf("cannot convert minimum value %s to integer", minMax[0])
}
max, err = strconv.Atoi(minMax[1])
if err != nil {
return 0, 0, fmt.Errorf("cannot convert maximum value %s to integer", minMax[1])
}
return min, max, nil
}
// RangeConfig containers a Min/Max and an optional SpotFrom field which
// specifies the number of services you want to start placing on spot. For
// example, if your range is 1-10 and `spot_from` is 5, up to 4 services will
// be placed on dedicated Fargate capacity, and then after that, any scaling
// event will place additioanl services on spot capacity.
type RangeConfig struct {
Min *int `yaml:"min"`
Max *int `yaml:"max"`
SpotFrom *int `yaml:"spot_from"`
}
// IsEmpty returns whether RangeConfig is empty.
func (r *RangeConfig) IsEmpty() bool {
return r.Min == nil && r.Max == nil && r.SpotFrom == nil
}
// Count is a custom type which supports unmarshaling yaml which
// can either be of type int or type AdvancedCount.
type Count struct {
Value *int // 0 is a valid value, so we want the default value to be nil.
AdvancedCount AdvancedCount // Mutually exclusive with Value.
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the Count
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (c *Count) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&c.AdvancedCount); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !c.AdvancedCount.IsEmpty() {
// Successfully unmarshalled AdvancedCount fields, return
return nil
}
if err := value.Decode(&c.Value); err != nil {
return errUnmarshalCountOpts
}
return nil
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the ScalingConfigOrT
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (r *ScalingConfigOrT[_]) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&r.ScalingConfig); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !r.ScalingConfig.IsEmpty() {
// Successfully unmarshalled ScalingConfig fields, return
return nil
}
if err := value.Decode(&r.Value); err != nil {
return errors.New(`unable to unmarshal into int or composite-style map`)
}
return nil
}
// IsEmpty returns whether Count is empty.
func (c *Count) IsEmpty() bool {
return c.Value == nil && c.AdvancedCount.IsEmpty()
}
// Desired returns the desiredCount to be set on the CFN template
func (c *Count) Desired() (*int, error) {
if c.AdvancedCount.IsEmpty() {
return c.Value, nil
}
if c.AdvancedCount.IgnoreRange() {
return c.AdvancedCount.Spot, nil
}
min, _, err := c.AdvancedCount.Range.Parse()
if err != nil {
return nil, fmt.Errorf("parse task count value %s: %w", aws.StringValue((*string)(c.AdvancedCount.Range.Value)), err)
}
return aws.Int(min), nil
}
// Percentage represents a valid percentage integer ranging from 0 to 100.
type Percentage int
// ScalingConfigOrT represents a resource that has autoscaling configurations or a generic value.
type ScalingConfigOrT[T ~int | time.Duration] struct {
Value *T
ScalingConfig AdvancedScalingConfig[T] // mutually exclusive with Value
}
// AdvancedScalingConfig represents advanced configurable options for a scaling policy.
type AdvancedScalingConfig[T ~int | time.Duration] struct {
Value *T `yaml:"value"`
Cooldown Cooldown `yaml:"cooldown"`
}
// Cooldown represents the autoscaling cooldown of resources.
type Cooldown struct {
ScaleInCooldown *time.Duration `yaml:"in"`
ScaleOutCooldown *time.Duration `yaml:"out"`
}
// AdvancedCount represents the configurable options for Auto Scaling as well as
// Capacity configuration (spot).
type AdvancedCount struct {
Spot *int `yaml:"spot"` // mutually exclusive with other fields
Range Range `yaml:"range"`
Cooldown Cooldown `yaml:"cooldown"`
CPU ScalingConfigOrT[Percentage] `yaml:"cpu_percentage"`
Memory ScalingConfigOrT[Percentage] `yaml:"memory_percentage"`
Requests ScalingConfigOrT[int] `yaml:"requests"`
ResponseTime ScalingConfigOrT[time.Duration] `yaml:"response_time"`
QueueScaling QueueScaling `yaml:"queue_delay"`
workloadType string
}
// IsEmpty returns whether ScalingConfigOrT is empty
func (r *ScalingConfigOrT[_]) IsEmpty() bool {
return r.ScalingConfig.IsEmpty() && r.Value == nil
}
// IsEmpty returns whether AdvancedScalingConfig is empty
func (a *AdvancedScalingConfig[_]) IsEmpty() bool {
return a.Cooldown.IsEmpty() && a.Value == nil
}
// IsEmpty returns whether Cooldown is empty
func (c *Cooldown) IsEmpty() bool {
return c.ScaleInCooldown == nil && c.ScaleOutCooldown == nil
}
// IsEmpty returns whether AdvancedCount is empty.
func (a *AdvancedCount) IsEmpty() bool {
return a.Range.IsEmpty() && a.CPU.IsEmpty() && a.Memory.IsEmpty() && a.Cooldown.IsEmpty() &&
a.Requests.IsEmpty() && a.ResponseTime.IsEmpty() && a.Spot == nil && a.QueueScaling.IsEmpty()
}
// IgnoreRange returns whether desiredCount is specified on spot capacity
func (a *AdvancedCount) IgnoreRange() bool {
return a.Spot != nil
}
func (a *AdvancedCount) hasAutoscaling() bool {
return !a.Range.IsEmpty() || a.hasScalingFieldsSet()
}
func (a *AdvancedCount) validScalingFields() []string {
switch a.workloadType {
case manifestinfo.LoadBalancedWebServiceType:
return []string{"cpu_percentage", "memory_percentage", "requests", "response_time"}
case manifestinfo.BackendServiceType:
return []string{"cpu_percentage", "memory_percentage", "requests", "response_time"}
case manifestinfo.WorkerServiceType:
return []string{"cpu_percentage", "memory_percentage", "queue_delay"}
default:
return nil
}
}
func (a *AdvancedCount) hasScalingFieldsSet() bool {
switch a.workloadType {
case manifestinfo.LoadBalancedWebServiceType:
return !a.CPU.IsEmpty() || !a.Memory.IsEmpty() || !a.Requests.IsEmpty() || !a.ResponseTime.IsEmpty()
case manifestinfo.BackendServiceType:
return !a.CPU.IsEmpty() || !a.Memory.IsEmpty() || !a.Requests.IsEmpty() || !a.ResponseTime.IsEmpty()
case manifestinfo.WorkerServiceType:
return !a.CPU.IsEmpty() || !a.Memory.IsEmpty() || !a.QueueScaling.IsEmpty()
default:
return !a.CPU.IsEmpty() || !a.Memory.IsEmpty() || !a.Requests.IsEmpty() || !a.ResponseTime.IsEmpty() || !a.QueueScaling.IsEmpty()
}
}
func (a *AdvancedCount) getInvalidFieldsSet() []string {
var invalidFields []string
switch a.workloadType {
case manifestinfo.LoadBalancedWebServiceType:
if !a.QueueScaling.IsEmpty() {
invalidFields = append(invalidFields, "queue_delay")
}
case manifestinfo.BackendServiceType:
if !a.QueueScaling.IsEmpty() {
invalidFields = append(invalidFields, "queue_delay")
}
case manifestinfo.WorkerServiceType:
if !a.Requests.IsEmpty() {
invalidFields = append(invalidFields, "requests")
}
if !a.ResponseTime.IsEmpty() {
invalidFields = append(invalidFields, "response_time")
}
}
return invalidFields
}
func (a *AdvancedCount) unsetAutoscaling() {
a.Range = Range{}
a.Cooldown = Cooldown{}
a.CPU = ScalingConfigOrT[Percentage]{}
a.Memory = ScalingConfigOrT[Percentage]{}
a.Requests = ScalingConfigOrT[int]{}
a.ResponseTime = ScalingConfigOrT[time.Duration]{}
a.QueueScaling = QueueScaling{}
}
// QueueScaling represents the configuration to scale a service based on a SQS queue.
type QueueScaling struct {
AcceptableLatency *time.Duration `yaml:"acceptable_latency"`
AvgProcessingTime *time.Duration `yaml:"msg_processing_time"`
Cooldown Cooldown `yaml:"cooldown"`
}
// IsEmpty returns true if the QueueScaling is set.
func (qs *QueueScaling) IsEmpty() bool {
return qs.AcceptableLatency == nil && qs.AvgProcessingTime == nil && qs.Cooldown.IsEmpty()
}
// AcceptableBacklogPerTask returns the total number of messages that each task can accumulate in the queue
// while maintaining the AcceptableLatency given the AvgProcessingTime.
func (qs *QueueScaling) AcceptableBacklogPerTask() (int, error) {
if qs.IsEmpty() {
return 0, errors.New(`"queue_delay" must be specified in order to calculate the acceptable backlog`)
}
v := math.Ceil(float64(*qs.AcceptableLatency) / float64(*qs.AvgProcessingTime))
return int(v), nil
}
// HTTPHealthCheckArgs holds the configuration to determine if the load balanced web service is healthy.
// These options are specifiable under the "healthcheck" field.
// See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html.
type HTTPHealthCheckArgs struct {
Path *string `yaml:"path"`
Port *int `yaml:"port"`
SuccessCodes *string `yaml:"success_codes"`
HealthyThreshold *int64 `yaml:"healthy_threshold"`
UnhealthyThreshold *int64 `yaml:"unhealthy_threshold"`
Timeout *time.Duration `yaml:"timeout"`
Interval *time.Duration `yaml:"interval"`
GracePeriod *time.Duration `yaml:"grace_period"`
}
// HealthCheckArgsOrString is a custom type which supports unmarshaling yaml which
// can either be of type string or type HealthCheckArgs.
type HealthCheckArgsOrString struct {
Union[string, HTTPHealthCheckArgs]
}
// Path returns the default health check path if provided otherwise, returns the path from the advanced configuration.
func (hc *HealthCheckArgsOrString) Path() *string {
if hc.IsBasic() {
return aws.String(hc.Basic)
}
return hc.Advanced.Path
}
// NLBHealthCheckArgs holds the configuration to determine if the network load balanced web service is healthy.
// These options are specifiable under the "healthcheck" field.
// See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html.
type NLBHealthCheckArgs struct {
Port *int `yaml:"port"`
HealthyThreshold *int64 `yaml:"healthy_threshold"`
UnhealthyThreshold *int64 `yaml:"unhealthy_threshold"`
Timeout *time.Duration `yaml:"timeout"`
Interval *time.Duration `yaml:"interval"`
GracePeriod *time.Duration `yaml:"grace_period"`
}
func (h *NLBHealthCheckArgs) isEmpty() bool {
return h.Port == nil && h.HealthyThreshold == nil && h.UnhealthyThreshold == nil && h.Timeout == nil && h.Interval == nil
}
// ParsePortMapping parses port-protocol string into individual port and protocol strings.
// Valid examples: 2000/udp, or 2000.
func ParsePortMapping(s *string) (port *string, protocol *string, err error) {
if s == nil {
return nil, nil, nil
}
portProtocol := strings.Split(*s, "/")
switch len(portProtocol) {
case 1:
return aws.String(portProtocol[0]), nil, nil
case 2:
return aws.String(portProtocol[0]), aws.String(portProtocol[1]), nil
default:
return nil, nil, fmt.Errorf("cannot parse port mapping from %s", *s)
}
}
type fromCFN struct {
Name *string `yaml:"from_cfn"`
}
func (cfg *fromCFN) isEmpty() bool {
return cfg.Name == nil
}
type stringOrFromCFN struct {
Plain *string
FromCFN fromCFN
}
func (s stringOrFromCFN) isEmpty() bool {
return s.FromCFN.isEmpty() && s.Plain == nil
}
// UnmarshalYAML implements the yaml.Unmarshaler (v3) interface to override the default YAML unmarshalling logic.
func (s *stringOrFromCFN) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&s.FromCFN); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !s.FromCFN.isEmpty() { // Successfully unmarshalled to a environment import name.
return nil
}
if err := value.Decode(&s.Plain); err != nil { // Otherwise, try decoding the simple form.
return errors.New(`cannot unmarshal field to a string or into a map`)
}
return nil
}
func (cfg ImageWithPortAndHealthcheck) exposedPorts(workloadName string) []ExposedPort {
if cfg.Port == nil {
return nil
}
return []ExposedPort{
{
Port: aws.Uint16Value(cfg.Port),
Protocol: "tcp",
ContainerName: workloadName,
isDefinedByContainer: true,
},
}
}
func (cfg ImageWithHealthcheckAndOptionalPort) exposedPorts(workloadName string) []ExposedPort {
if cfg.Port == nil {
return nil
}
return []ExposedPort{
{
Port: aws.Uint16Value(cfg.Port),
Protocol: "tcp",
ContainerName: workloadName,
isDefinedByContainer: true,
},
}
}
// exportPorts returns any new ports that should be exposed given the application load balancer
// configuration that's not part of the existing containerPorts.
func (rr RoutingRule) exposedPorts(exposedPorts []ExposedPort, workloadName string) []ExposedPort {
if rr.TargetPort == nil {
return nil
}
targetContainer := workloadName
if rr.TargetContainer != nil {
targetContainer = aws.StringValue(rr.TargetContainer)
}
for _, exposedPort := range exposedPorts {
if aws.Uint16Value(rr.TargetPort) == exposedPort.Port {
return nil
}
}
return []ExposedPort{
{
Port: aws.Uint16Value(rr.TargetPort),
Protocol: "tcp",
ContainerName: targetContainer,
},
}
}
// exportPorts returns any new ports that should be exposed given the network load balancer
// configuration that's not part of the existing containerPorts.
func (cfg NetworkLoadBalancerListener) exposedPorts(exposedPorts []ExposedPort, workloadName string) ([]ExposedPort, error) {
if cfg.IsEmpty() {
return nil, nil
}
nlbPort, _, err := ParsePortMapping(cfg.Port)
if err != nil {
return nil, err
}
port, err := strconv.ParseUint(aws.StringValue(nlbPort), 10, 16)
if err != nil {
return nil, err
}
targetPort := uint16(port)
if cfg.TargetPort != nil {
targetPort = uint16(aws.IntValue(cfg.TargetPort))
}
for _, exposedPort := range exposedPorts {
if targetPort == exposedPort.Port {
return nil, nil
}
}
targetContainer := workloadName
if cfg.TargetContainer != nil {
targetContainer = aws.StringValue(cfg.TargetContainer)
}
return []ExposedPort{
{
Port: targetPort,
Protocol: "tcp",
ContainerName: targetContainer,
},
}, nil
}
func (sidecar SidecarConfig) exposedPorts(sidecarName string) ([]ExposedPort, error) {
if sidecar.Port == nil {
return nil, nil
}
sidecarPort, protocolPtr, err := ParsePortMapping(sidecar.Port)
if err != nil {
return nil, err
}
protocol := aws.StringValue(protocolPtr)
if protocolPtr == nil {
protocol = "tcp"
}
port, err := strconv.ParseUint(aws.StringValue(sidecarPort), 10, 16)
if err != nil {
return nil, err
}
return []ExposedPort{
{
Port: uint16(port),
Protocol: strings.ToLower(protocol),
ContainerName: sidecarName,
isDefinedByContainer: true,
},
}, nil
}
func sortExposedPorts(exposedPorts []ExposedPort) []ExposedPort {
// Sort the exposed ports so that the order is consistent and the integration test won't be flaky.
sort.Slice(exposedPorts, func(i, j int) bool {
return exposedPorts[i].Port < exposedPorts[j].Port
})
return exposedPorts
}
// Target returns target container and target port for the ALB configuration.
// This method should be called only when ALB config is not empty.
func (rr *RoutingRule) Target(exposedPorts ExposedPortsIndex) (targetContainer string, targetPort string, err error) {
// Route load balancer traffic to main container by default.
targetContainer = exposedPorts.WorkloadName
targetPort = exposedPorts.mainContainerPort()
rrTargetContainer := rr.TargetContainer
rrTargetPort := rr.TargetPort
if rrTargetContainer == nil && rrTargetPort == nil { // both targetPort and targetContainer are nil.
return
}
if rrTargetPort == nil { // when target_port is nil
if aws.StringValue(rrTargetContainer) != exposedPorts.WorkloadName {
targetContainer = aws.StringValue(rrTargetContainer)
targetPort = exposedPorts.containerPortDefinedBy(aws.StringValue(rrTargetContainer))
/* NOTE: When the `target_port` is empty, the intended target port should be the port that is explicitly exposed by the container. Consider the following example
```
http:
target_container: nginx
sidecars:
nginx:
port: 81 # Explicitly exposed by the nginx container.
```
In this example, the target port for the ALB listener rule should be 81
*/
}
return
}
if rrTargetContainer == nil { // when target_container is nil
container, port := targetContainerFromTargetPort(exposedPorts, rrTargetPort)
targetPort = aws.StringValue(port)
//In general, containers aren't expected to be empty. But this condition is applied for extra safety.
if container != nil {
targetContainer = aws.StringValue(container)
}
return
}
// when both target_port and target_container are not nil
targetContainer = aws.StringValue(rrTargetContainer)
targetPort = template.StrconvUint16(aws.Uint16Value(rrTargetPort))
return
}
// targetContainerFromTargetPort returns target container and target port from the given target_port input.
func targetContainerFromTargetPort(exposedPorts ExposedPortsIndex, port *uint16) (targetContainer *string, targetPort *string) {
// Route load balancer traffic to the target_port if mentioned.
targetPort = aws.String(template.StrconvUint16(aws.Uint16Value(port)))
// It shouldn’t be possible that container is empty for the given port as exposed port assigns container to all the ports, this is just for the extra safety.
if exposedPorts.ContainerForPort[aws.Uint16Value(port)] != "" {
targetContainer = aws.String(exposedPorts.ContainerForPort[aws.Uint16Value(port)])
}
return
}
// MainContainerPort returns the main container port.
func (s *LoadBalancedWebService) MainContainerPort() string {
return strconv.FormatUint(uint64(aws.Uint16Value(s.ImageConfig.Port)), 10)
}
// MainContainerPort returns the main container port if given.
func (s *BackendService) MainContainerPort() string {
port := template.NoExposedContainerPort
if s.BackendServiceConfig.ImageConfig.Port != nil {
port = strconv.FormatUint(uint64(aws.Uint16Value(s.BackendServiceConfig.ImageConfig.Port)), 10)
}
return port
}
func prepareParsedExposedPortsMap(exposedPorts []ExposedPort) (map[string][]ExposedPort, map[uint16]string) {
parsedContainerMap := make(map[string][]ExposedPort)
parsedExposedPortMap := make(map[uint16]string)
for _, exposedPort := range exposedPorts {
parsedContainerMap[exposedPort.ContainerName] = append(parsedContainerMap[exposedPort.ContainerName], exposedPort)
parsedExposedPortMap[exposedPort.Port] = exposedPort.ContainerName
}
return parsedContainerMap, parsedExposedPortMap
}
// Target returns target container and target port for a NLB listener configuration.
func (listener NetworkLoadBalancerListener) Target(exposedPorts ExposedPortsIndex) (targetContainer string, targetPort string, err error) {
// Parse listener port and protocol.
port, _, err := ParsePortMapping(listener.Port)
if err != nil {
return "", "", err
}
// By default, the target port is the same as listener port.
targetPort = aws.StringValue(port)
targetContainer = exposedPorts.WorkloadName
if listener.TargetContainer == nil && listener.TargetPort == nil { // both targetPort and targetContainer are nil.
return
}
if listener.TargetPort == nil { // when target_port is nil
if aws.StringValue(listener.TargetContainer) != exposedPorts.WorkloadName {
targetContainer = aws.StringValue(listener.TargetContainer)
for _, portConfig := range exposedPorts.PortsForContainer[targetContainer] {
if portConfig.isDefinedByContainer {
targetPort = strconv.Itoa(int(portConfig.Port))
/* NOTE: When the `target_port` is empty, the intended target port should be the port that is explicitly exposed by the container. Consider the following example
```
http:
target_container: nginx
target_port: 83 # Implicitly exposed by the nginx container
nlb:
port: 80/tcp
target_container: nginx
sidecars:
nginx:
port: 81 # Explicitly exposed by the nginx container.
```
In this example, the target port for the NLB listener should be 81
*/
}
}
}
return
}
if listener.TargetContainer == nil { // when target_container is nil
container, port := targetContainerFromTargetPort(exposedPorts, uint16P(uint16(aws.IntValue(listener.TargetPort))))
targetPort = aws.StringValue(port)
//In general, containers aren't expected to be empty. But this condition is applied for extra safety.
if container != nil {
targetContainer = aws.StringValue(container)
}
return
}
// when both target_port and target_container are not nil
targetContainer = aws.StringValue(listener.TargetContainer)
targetPort = template.StrconvUint16(uint16(aws.IntValue(listener.TargetPort)))
return
}
| 705 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestUnmarshalSvc(t *testing.T) {
perc := Percentage(70)
mockConfig := ScalingConfigOrT[Percentage]{
Value: &perc,
}
testCases := map[string]struct {
inContent string
requireCorrectValues func(t *testing.T, i interface{})
wantedErr error
}{
"load balanced web service": {
inContent: `
version: 1.0
name: frontend
type: "Load Balanced Web Service"
taskdef_overrides:
- path: "ContainerDefinitions[0].Ulimits[-].HardLimit"
value: !Ref ParamName
image:
location: foo/bar
credentials: some arn
port: 80
cpu: 512
memory: 1024
count: 1
exec: true
network:
connect: true
http:
path: "svc"
target_container: "frontend"
alias:
- foobar.com
- v1.foobar.com
allowed_source_ips:
- 10.1.0.0/24
- 10.1.1.0/24
variables:
LOG_LEVEL: "WARN"
secrets:
DB_PASSWORD: MYSQL_DB_PASSWORD
sidecars:
xray:
port: 2000/udp
image: 123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon
credentialsParameter: some arn
nginx:
image:
build:
dockerfile: "web/Dockerfile"
context: "pathto/Dockerfile"
target: "build-stage"
cache_from:
- foo/bar:latest
args:
arg1: value1
logging:
destination:
Name: cloudwatch
include-pattern: ^[a-z][aeiou].*$
exclude-pattern: ^.*[aeiou]$
enableMetadata: false
secretOptions:
LOG_TOKEN: LOG_TOKEN
configFilePath: /extra.conf
environments:
test:
count: 3
staging1:
count:
spot: 5
staging2:
count:
range:
min: 2
max: 8
spot_from: 4
prod:
count:
range: 1-10
cpu_percentage: 70
`,
requireCorrectValues: func(t *testing.T, i interface{}) {
mockRange := IntRangeBand("1-10")
actualManifest, ok := i.(*LoadBalancedWebService)
require.True(t, ok)
wantedManifest := &LoadBalancedWebService{
Workload: Workload{Name: aws.String("frontend"), Type: aws.String(manifestinfo.LoadBalancedWebServiceType)},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{},
Location: aws.String("foo/bar"),
},
Credentials: aws.String("some arn"),
}, Port: aws.Uint16(80)},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Alias: Alias{
AdvancedAliases: []AdvancedAlias{},
StringSliceOrString: StringSliceOrString{
StringSlice: []string{
"foobar.com",
"v1.foobar.com",
},
},
},
Path: aws.String("svc"),
TargetContainer: aws.String("frontend"),
HealthCheck: HealthCheckArgsOrString{},
AllowedSourceIps: []IPNet{IPNet("10.1.0.0/24"), IPNet("10.1.1.0/24")},
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(512),
Memory: aws.Int(1024),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(true),
},
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("WARN"),
},
},
},
Secrets: map[string]Secret{
"DB_PASSWORD": {
from: stringOrFromCFN{
Plain: aws.String("MYSQL_DB_PASSWORD"),
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000/udp"),
Image: BasicToUnion[*string, ImageLocationOrBuild](aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon")),
CredsParam: aws.String("some arn"),
},
"nginx": {
Image: AdvancedToUnion[*string](
ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("web/Dockerfile"),
Context: aws.String("pathto/Dockerfile"),
Target: aws.String("build-stage"),
CacheFrom: []string{"foo/bar:latest"},
Args: map[string]string{
"arg1": "value1",
},
},
},
},
),
},
},
Logging: Logging{
Destination: map[string]string{
"exclude-pattern": "^.*[aeiou]$",
"include-pattern": "^[a-z][aeiou].*$",
"Name": "cloudwatch",
},
EnableMetadata: aws.Bool(false),
ConfigFile: aws.String("/extra.conf"),
SecretOptions: map[string]Secret{
"LOG_TOKEN": {
from: stringOrFromCFN{
Plain: aws.String("LOG_TOKEN"),
},
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
Connect: ServiceConnectBoolOrArgs{
EnableServiceConnect: aws.Bool(true),
},
},
TaskDefOverrides: []OverrideRule{
{
Path: "ContainerDefinitions[0].Ulimits[-].HardLimit",
Value: yaml.Node{
Kind: 8,
Style: 1,
Tag: "!Ref",
Value: "ParamName",
Line: 7,
Column: 12,
},
},
},
},
Environments: map[string]*LoadBalancedWebServiceConfig{
"test": {
TaskConfig: TaskConfig{
Count: Count{
Value: aws.Int(3),
},
},
},
"staging1": {
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(5),
},
},
},
},
"staging2": {
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(8),
SpotFrom: aws.Int(4),
},
},
},
},
},
},
"prod": {
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{
Value: &mockRange,
},
CPU: mockConfig,
},
},
},
},
},
}
require.Equal(t, wantedManifest, actualManifest)
},
},
"Backend Service": {
inContent: `
name: subscribers
type: Backend Service
image:
build: ./subscribers/Dockerfile
port: 8080
healthcheck:
command: ['CMD-SHELL', 'curl http://localhost:5000/ || exit 1']
cpu: 1024
memory: 1024
secrets:
API_TOKEN: SUBS_API_TOKEN`,
requireCorrectValues: func(t *testing.T, i interface{}) {
actualManifest, ok := i.(*BackendService)
require.True(t, ok)
wantedManifest := &BackendService{
Workload: Workload{
Name: aws.String("subscribers"),
Type: aws.String(manifestinfo.BackendServiceType),
},
BackendServiceConfig: BackendServiceConfig{
ImageConfig: ImageWithHealthcheckAndOptionalPort{
ImageWithOptionalPort: ImageWithOptionalPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("./subscribers/Dockerfile"),
},
},
},
Port: aws.Uint16(8080),
},
HealthCheck: ContainerHealthCheck{
Command: []string{"CMD-SHELL", "curl http://localhost:5000/ || exit 1"},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(1024),
Memory: aws.Int(1024),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{
workloadType: manifestinfo.BackendServiceType,
},
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
Secrets: map[string]Secret{
"API_TOKEN": {
from: stringOrFromCFN{
Plain: aws.String("SUBS_API_TOKEN"),
},
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*BackendServiceConfig{},
}
require.Equal(t, wantedManifest, actualManifest)
},
},
"Worker Service": {
inContent: `
name: dogcategorizer
type: Worker Service
image:
build: ./dogcategorizer/Dockerfile
cpu: 1024
memory: 1024
exec: true # Enable running commands in your container.
count: 1
subscribe:
queue:
delay: 15s
dead_letter:
tries: 5
topics:
- name: publisher1
service: testpubsvc
- name: publisher2
service: testpubjob
queue:
timeout: 15s`,
requireCorrectValues: func(t *testing.T, i interface{}) {
actualManifest, ok := i.(*WorkerService)
duration15Seconds := 15 * time.Second
require.True(t, ok)
wantedManifest := &WorkerService{
Workload: Workload{
Name: aws.String("dogcategorizer"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("./dogcategorizer/Dockerfile"),
},
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(1024),
Memory: aws.Int(1024),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{
workloadType: manifestinfo.WorkerServiceType,
},
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(true),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("publisher1"),
Service: aws.String("testpubsvc"),
},
{
Name: aws.String("publisher2"),
Service: aws.String("testpubjob"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Timeout: &duration15Seconds,
},
},
},
},
Queue: SQSQueue{
Delay: &duration15Seconds,
DeadLetter: DeadLetterQueue{
Tries: aws.Uint16(5),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{},
}
require.Equal(t, wantedManifest, actualManifest)
},
},
"invalid svc type": {
inContent: `
name: CowSvc
type: 'OH NO'
`,
wantedErr: &ErrInvalidWorkloadType{Type: "OH NO"},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
m, err := UnmarshalWorkload([]byte(tc.inContent))
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
tc.requireCorrectValues(t, m.Manifest())
}
})
}
}
func TestStringOrFromCFN_UnmarshalYAML(t *testing.T) {
type mockField struct {
stringOrFromCFN
}
type mockParentField struct {
MockField mockField `yaml:"mock_field"`
}
testCases := map[string]struct {
in []byte
wanted mockParentField
wantedError error
}{
"unmarshal plain string": {
in: []byte(`mock_field: hey`),
wanted: mockParentField{
MockField: mockField{
stringOrFromCFN{
Plain: aws.String("hey"),
},
},
},
},
"unmarshal import name": {
in: []byte(`mock_field:
from_cfn: yo`),
wanted: mockParentField{
MockField: mockField{
stringOrFromCFN{
FromCFN: fromCFN{
Name: aws.String("yo"),
},
},
},
},
},
"nothing to unmarshal": {
in: []byte(`other_field: yo`),
},
"fail to unmarshal": {
in: []byte(`mock_field:
heyyo: !`),
wantedError: errors.New(`cannot unmarshal field to a string or into a map`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var s mockParentField
err := yaml.Unmarshal(tc.in, &s)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wanted, s)
}
})
}
}
func TestCount_UnmarshalYAML(t *testing.T) {
var (
perc = Percentage(70)
timeMinute = 60 * time.Second
reqNum = 1000
responseTime = 500 * time.Millisecond
mockRange = IntRangeBand("1-10")
mockAdvancedConfig = ScalingConfigOrT[Percentage]{
ScalingConfig: AdvancedScalingConfig[Percentage]{
Value: &perc,
Cooldown: Cooldown{
ScaleInCooldown: &timeMinute,
ScaleOutCooldown: &timeMinute,
},
},
}
mockConfig = ScalingConfigOrT[Percentage]{
Value: &perc,
}
mockCooldown = Cooldown{
ScaleInCooldown: &timeMinute,
ScaleOutCooldown: &timeMinute,
}
mockRequests = ScalingConfigOrT[int]{
Value: &reqNum,
}
mockResponseTime = ScalingConfigOrT[time.Duration]{
Value: &responseTime,
}
)
testCases := map[string]struct {
inContent []byte
wantedStruct Count
wantedError error
}{
"legacy case: simple task count": {
inContent: []byte(`count: 1`),
wantedStruct: Count{
Value: aws.Int(1),
},
},
"With auto scaling enabled": {
inContent: []byte(`count:
range: 1-10
cpu_percentage:
value: 70
cooldown:
in: 1m
out: 1m
memory_percentage: 70
requests: 1000
response_time: 500ms
`),
wantedStruct: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
CPU: mockAdvancedConfig,
Memory: mockConfig,
Requests: mockRequests,
ResponseTime: mockResponseTime,
},
},
},
"With spot specified as count": {
inContent: []byte(`count:
spot: 42
`),
wantedStruct: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(42),
},
},
},
"With range specified as min-max": {
inContent: []byte(`count:
range:
min: 5
max: 15
`),
wantedStruct: Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(5),
Max: aws.Int(15),
},
},
},
},
},
"With all RangeConfig fields specified and autoscaling field": {
inContent: []byte(`count:
range:
min: 2
max: 8
spot_from: 3
cooldown:
in: 1m
out: 1m
cpu_percentage: 70
`),
wantedStruct: Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(8),
SpotFrom: aws.Int(3),
},
},
Cooldown: mockCooldown,
CPU: mockConfig,
},
},
},
"Error if unmarshalable": {
inContent: []byte(`count: badNumber
`),
wantedError: errUnmarshalCountOpts,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var b TaskConfig
err := yaml.Unmarshal(tc.inContent, &b)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
// check memberwise dereferenced pointer equality
require.Equal(t, tc.wantedStruct.Value, b.Count.Value)
require.Equal(t, tc.wantedStruct.AdvancedCount.Range, b.Count.AdvancedCount.Range)
require.Equal(t, tc.wantedStruct.AdvancedCount.Cooldown, b.Count.AdvancedCount.Cooldown)
require.Equal(t, tc.wantedStruct.AdvancedCount.CPU, b.Count.AdvancedCount.CPU)
require.Equal(t, tc.wantedStruct.AdvancedCount.Memory, b.Count.AdvancedCount.Memory)
require.Equal(t, tc.wantedStruct.AdvancedCount.Requests, b.Count.AdvancedCount.Requests)
require.Equal(t, tc.wantedStruct.AdvancedCount.ResponseTime, b.Count.AdvancedCount.ResponseTime)
require.Equal(t, tc.wantedStruct.AdvancedCount.Spot, b.Count.AdvancedCount.Spot)
}
})
}
}
func TestIntRangeBand_Parse(t *testing.T) {
testCases := map[string]struct {
inRange string
wantedMin int
wantedMax int
wantedErr error
}{
"invalid format": {
inRange: "badRange",
wantedErr: fmt.Errorf("invalid range value badRange. Should be in format of ${min}-${max}"),
},
"invalid minimum": {
inRange: "a-100",
wantedErr: fmt.Errorf("cannot convert minimum value a to integer"),
},
"invalid maximum": {
inRange: "1-a",
wantedErr: fmt.Errorf("cannot convert maximum value a to integer"),
},
"success": {
inRange: "1-10",
wantedMin: 1,
wantedMax: 10,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
r := IntRangeBand(tc.inRange)
gotMin, gotMax, err := r.Parse()
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedMin, gotMin)
require.Equal(t, tc.wantedMax, gotMax)
}
})
}
}
func TestRange_Parse(t *testing.T) {
mockRange := IntRangeBand("1-10")
testCases := map[string]struct {
input Range
wantedMin int
wantedMax int
}{
"success with range value": {
input: Range{
Value: &mockRange,
},
wantedMin: 1,
wantedMax: 10,
},
"success with range config": {
input: Range{
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(8),
},
},
wantedMin: 2,
wantedMax: 8,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotMin, gotMax, err := tc.input.Parse()
require.NoError(t, err)
require.Equal(t, tc.wantedMin, gotMin)
require.Equal(t, tc.wantedMax, gotMax)
})
}
}
func TestCount_Desired(t *testing.T) {
mockRange := IntRangeBand("1-10")
testCases := map[string]struct {
input *Count
expected *int
expectedErr error
}{
"with value": {
input: &Count{
Value: aws.Int(42),
},
expected: aws.Int(42),
},
"with spot count": {
input: &Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(31),
},
},
expected: aws.Int(31),
},
"with autoscaling range on dedicated capacity": {
input: &Count{
AdvancedCount: AdvancedCount{
Range: Range{
Value: &mockRange,
},
},
},
expected: aws.Int(1),
},
"with autoscaling range with spot capacity": {
input: &Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(5),
Max: aws.Int(10),
},
},
},
},
expected: aws.Int(5),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
actual, err := tc.input.Desired()
// THEN
if tc.expectedErr != nil {
require.EqualError(t, err, tc.expectedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.expected, actual)
}
})
}
}
func TestHealthCheckArgsOrString_IsEmpty(t *testing.T) {
testCases := map[string]struct {
hc HealthCheckArgsOrString
wanted bool
}{
"should return true if there are no settings": {
wanted: true,
},
"should return false if a path is set via the basic configuration": {
hc: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
wanted: false,
},
"should return false if a value is set via the advanced configuration": {
hc: HealthCheckArgsOrString{
Union: BasicToUnion[string, HTTPHealthCheckArgs]("/"),
},
wanted: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.hc.IsZero())
})
}
}
func TestQueueScaling_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in QueueScaling
wanted bool
}{
"should return false if msg_processing_time is not nil": {
in: QueueScaling{
AvgProcessingTime: durationp(5 * time.Second),
},
},
"should return false if acceptable_latency is not nil": {
in: QueueScaling{
AcceptableLatency: durationp(1 * time.Minute),
},
},
"should return true if there are no fields set": {
wanted: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.IsEmpty())
})
}
}
func TestQueueScaling_AcceptableBacklogPerTask(t *testing.T) {
testCases := map[string]struct {
in QueueScaling
wantedBacklog int
wantedErr error
}{
"should return an error if queue scaling is empty": {
in: QueueScaling{},
wantedErr: errors.New(`"queue_delay" must be specified in order to calculate the acceptable backlog`),
},
"should round up to an integer if backlog number has a decimal": {
in: QueueScaling{
AcceptableLatency: durationp(10 * time.Second),
AvgProcessingTime: durationp(300 * time.Millisecond),
},
wantedBacklog: 34,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
actual, err := tc.in.AcceptableBacklogPerTask()
if tc.wantedErr != nil {
require.NotNil(t, err)
} else {
require.Equal(t, tc.wantedBacklog, actual)
}
})
}
}
func TestParsePortMapping(t *testing.T) {
testCases := map[string]struct {
inPort *string
wantedPort *string
wantedProtocol *string
wantedErr error
}{
"error parsing port": {
inPort: stringP("1/2/3"),
wantedErr: errors.New("cannot parse port mapping from 1/2/3"),
},
"no error if input is empty": {},
"port number only": {
inPort: stringP("443"),
wantedPort: stringP("443"),
},
"port and protocol": {
inPort: stringP("443/tcp"),
wantedPort: stringP("443"),
wantedProtocol: stringP("tcp"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotPort, gotProtocol, err := ParsePortMapping(tc.inPort)
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, gotPort, tc.wantedPort)
require.Equal(t, gotProtocol, tc.wantedProtocol)
}
})
}
}
func TestLoadBalancedWebService_NetworkLoadBalancerTarget(t *testing.T) {
testCases := map[string]struct {
in LoadBalancedWebService
wantedTargetContainer []string
wantedTargetPort []string
wantedErr error
}{
"should return primary container name/nlb port as targetContainer/targetPort in case targetContainer and targetPort is not given ": {
in: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("foo"),
Type: aws.String("Load Balanced WebService"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("80/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("81/tcp"),
},
},
},
},
},
wantedTargetContainer: []string{"foo", "foo"},
wantedTargetPort: []string{"80", "81"},
},
"should return targetContainer and targetPort as is if they are given ": {
in: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("foo"),
Type: aws.String("Load Balanced WebService"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("80/tcp"),
TargetPort: aws.Int(81),
TargetContainer: aws.String("bar"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("82/tcp"),
TargetPort: aws.Int(83),
TargetContainer: aws.String("nginx"),
},
},
},
Sidecars: map[string]*SidecarConfig{
"bar": {
Port: aws.String("8080"),
},
"nginx": {
Port: aws.String("8081"),
},
},
},
},
wantedTargetContainer: []string{"bar", "nginx"},
wantedTargetPort: []string{"81", "83"},
},
"should return error if targetPort is of incorrect type": {
in: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("foo"),
Type: aws.String("Load Balanced WebService"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("80/80/80"),
},
},
},
},
wantedErr: errors.New(`cannot parse port mapping from 80/80/80`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
exposedPorts, _ := tc.in.ExposedPorts()
for idx, listener := range tc.in.NLBConfig.NLBListeners() {
targetContainer, targetPort, err := listener.Target(exposedPorts)
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.Equal(t, tc.wantedTargetContainer[idx], targetContainer)
require.Equal(t, tc.wantedTargetPort[idx], targetPort)
}
}
})
}
}
| 1,035 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/imdario/mergo"
)
var fmtExclusiveFieldsSpecifiedTogether = "invalid manifest: %s %s mutually exclusive with %s and shouldn't be specified at the same time"
var defaultTransformers = []mergo.Transformers{
// NOTE: basicTransformer needs to be used before the rest of the custom transformers, because the other transformers
// do not merge anything - they just unset the fields that do not get specified in source manifest.
basicTransformer{},
imageTransformer{},
buildArgsOrStringTransformer{},
aliasTransformer{},
stringSliceOrStringTransformer{},
platformArgsOrStringTransformer{},
securityGroupsIDsOrConfigTransformer{},
serviceConnectTransformer{},
placementArgOrStringTransformer{},
subnetListOrArgsTransformer{},
unionTransformer{},
countTransformer{},
advancedCountTransformer{},
scalingConfigOrTTransformer[Percentage]{},
scalingConfigOrTTransformer[int]{},
scalingConfigOrTTransformer[time.Duration]{},
rangeTransformer{},
efsConfigOrBoolTransformer{},
efsVolumeConfigurationTransformer{},
sqsQueueOrBoolTransformer{},
httpOrBoolTransformer{},
secretTransformer{},
environmentCDNConfigTransformer{},
}
// See a complete list of `reflect.Kind` here: https://pkg.go.dev/reflect#Kind.
var basicKinds = []reflect.Kind{
reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64,
reflect.Complex64, reflect.Complex128,
reflect.Array, reflect.String, reflect.Slice,
}
type imageTransformer struct{}
// Transformer provides custom logic to transform an Image.
func (t imageTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(Image{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(Image), src.Interface().(Image)
if !srcStruct.Build.isEmpty() && srcStruct.Location != nil {
return fmt.Errorf(fmtExclusiveFieldsSpecifiedTogether, "image.build", "is", "image.location")
}
if !srcStruct.Build.isEmpty() {
dstStruct.Location = nil
}
if srcStruct.Location != nil {
dstStruct.Build = BuildArgsOrString{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type buildArgsOrStringTransformer struct{}
// Transformer returns custom merge logic for BuildArgsOrString's fields.
func (t buildArgsOrStringTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(BuildArgsOrString{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(BuildArgsOrString), src.Interface().(BuildArgsOrString)
if !srcStruct.BuildArgs.isEmpty() {
dstStruct.BuildString = nil
}
if srcStruct.BuildString != nil {
dstStruct.BuildArgs = DockerBuildArgs{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type aliasTransformer struct{}
// Transformer returns custom merge logic for Alias's fields.
func (t aliasTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(Alias{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct := dst.Convert(reflect.TypeOf(Alias{})).Interface().(Alias)
srcStruct := src.Convert(reflect.TypeOf(Alias{})).Interface().(Alias)
if len(srcStruct.AdvancedAliases) != 0 {
dstStruct.StringSliceOrString = StringSliceOrString{}
}
if !srcStruct.StringSliceOrString.isEmpty() {
dstStruct.AdvancedAliases = nil
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct).Convert(typ))
}
return nil
}
}
type stringSliceOrStringTransformer struct{}
// Transformer returns custom merge logic for StringSliceOrString's fields.
func (t stringSliceOrStringTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if !typ.ConvertibleTo(reflect.TypeOf(StringSliceOrString{})) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct := dst.Convert(reflect.TypeOf(StringSliceOrString{})).Interface().(StringSliceOrString)
srcStruct := src.Convert(reflect.TypeOf(StringSliceOrString{})).Interface().(StringSliceOrString)
if srcStruct.String != nil {
dstStruct.StringSlice = nil
}
if srcStruct.StringSlice != nil {
dstStruct.String = nil
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct).Convert(typ))
}
return nil
}
}
type platformArgsOrStringTransformer struct{}
// Transformer returns custom merge logic for PlatformArgsOrString's fields.
func (t platformArgsOrStringTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(PlatformArgsOrString{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(PlatformArgsOrString), src.Interface().(PlatformArgsOrString)
if srcStruct.PlatformString != nil {
dstStruct.PlatformArgs = PlatformArgs{}
}
if !srcStruct.PlatformArgs.isEmpty() {
dstStruct.PlatformString = nil
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type securityGroupsIDsOrConfigTransformer struct{}
// Transformer returns custom merge logic for SecurityGroupsIDsOrConfig's fields.
func (s securityGroupsIDsOrConfigTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(SecurityGroupsIDsOrConfig{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(SecurityGroupsIDsOrConfig), src.Interface().(SecurityGroupsIDsOrConfig)
if !srcStruct.AdvancedConfig.isEmpty() {
dstStruct.IDs = nil
}
if srcStruct.IDs != nil {
dstStruct.AdvancedConfig = SecurityGroupsConfig{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type placementArgOrStringTransformer struct{}
// Transformer returns custom merge logic for placementArgOrString's fields.
func (t placementArgOrStringTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(PlacementArgOrString{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(PlacementArgOrString), src.Interface().(PlacementArgOrString)
if srcStruct.PlacementString != nil {
dstStruct.PlacementArgs = PlacementArgs{}
}
if !srcStruct.PlacementArgs.isEmpty() {
dstStruct.PlacementString = nil
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type serviceConnectTransformer struct{}
// Transformer returns custom merge logic for serviceConnectTransformer's fields.
func (t serviceConnectTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(ServiceConnectBoolOrArgs{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(ServiceConnectBoolOrArgs), src.Interface().(ServiceConnectBoolOrArgs)
if srcStruct.EnableServiceConnect != nil {
dstStruct.ServiceConnectArgs = ServiceConnectArgs{}
}
if !srcStruct.ServiceConnectArgs.isEmpty() {
dstStruct.EnableServiceConnect = nil
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type subnetListOrArgsTransformer struct{}
// Transformer returns custom merge logic for subnetListOrArgsTransformer's fields.
func (t subnetListOrArgsTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(SubnetListOrArgs{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(SubnetListOrArgs), src.Interface().(SubnetListOrArgs)
if len(srcStruct.IDs) != 0 {
dstStruct.SubnetArgs = SubnetArgs{}
}
if !srcStruct.SubnetArgs.isEmpty() {
dstStruct.IDs = nil
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type unionTransformer struct{}
var unionPrefix, _, _ = strings.Cut(reflect.TypeOf(Union[any, any]{}).String(), "[")
// Transformer returns custom merge logic for union types.
func (t unionTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
// :sweat_smile: https://github.com/golang/go/issues/54393
// reflect currently doesn't have support for getting type parameters
// or checking if a type is a non-specific instantiation of a generic type
// (i.e., no way to tell if the type Union[string, bool] is a Union)
isUnion := strings.HasPrefix(typ.String(), unionPrefix)
if !isUnion {
return nil
}
return func(dst, src reflect.Value) (err error) {
defer func() {
// should realistically never happen unless Union type code has been
// refactored to change functions called via reflection.
if r := recover(); r != nil {
err = fmt.Errorf("override union: %v", r)
}
}()
isBasic := src.MethodByName("IsBasic").Call(nil)[0].Bool()
isAdvanced := src.MethodByName("IsAdvanced").Call(nil)[0].Bool()
// Call SetType with the correct type based on src's type.
// We use the value from dst because it holds the merged value.
if isBasic {
if dst.CanAddr() {
dst.Addr().MethodByName("SetBasic").Call([]reflect.Value{dst.FieldByName("Basic")})
}
} else if isAdvanced {
if dst.CanAddr() {
dst.Addr().MethodByName("SetAdvanced").Call([]reflect.Value{dst.FieldByName("Advanced")})
}
}
return nil
}
}
type countTransformer struct{}
// Transformer returns custom merge logic for Count's fields.
func (t countTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(Count{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(Count), src.Interface().(Count)
if !srcStruct.AdvancedCount.IsEmpty() {
dstStruct.Value = nil
}
if srcStruct.Value != nil {
dstStruct.AdvancedCount = AdvancedCount{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type advancedCountTransformer struct{}
// Transformer returns custom merge logic for AdvancedCount's fields.
func (t advancedCountTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(AdvancedCount{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(AdvancedCount), src.Interface().(AdvancedCount)
if srcStruct.Spot != nil {
dstStruct.unsetAutoscaling()
}
if srcStruct.hasAutoscaling() {
dstStruct.Spot = nil
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type scalingConfigOrTTransformer[T ~int | time.Duration] struct{}
// Transformer returns custom merge logic for ScalingConfigOrPercentage's fields.
func (t scalingConfigOrTTransformer[T]) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(ScalingConfigOrT[T]{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(ScalingConfigOrT[T]), src.Interface().(ScalingConfigOrT[T])
if !srcStruct.ScalingConfig.IsEmpty() {
dstStruct.Value = nil
}
if srcStruct.Value != nil {
dstStruct.ScalingConfig = AdvancedScalingConfig[T]{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type rangeTransformer struct{}
// Transformer returns custom merge logic for Range's fields.
func (t rangeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(Range{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(Range), src.Interface().(Range)
if !srcStruct.RangeConfig.IsEmpty() {
dstStruct.Value = nil
}
if srcStruct.Value != nil {
dstStruct.RangeConfig = RangeConfig{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type efsConfigOrBoolTransformer struct{}
// Transformer returns custom merge logic for EFSConfigOrBool's fields.
func (t efsConfigOrBoolTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(EFSConfigOrBool{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(EFSConfigOrBool), src.Interface().(EFSConfigOrBool)
if !srcStruct.Advanced.IsEmpty() {
dstStruct.Enabled = nil
}
if srcStruct.Enabled != nil {
dstStruct.Advanced = EFSVolumeConfiguration{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type efsVolumeConfigurationTransformer struct{}
// Transformer returns custom merge logic for EFSVolumeConfiguration's fields.
func (t efsVolumeConfigurationTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(EFSVolumeConfiguration{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(EFSVolumeConfiguration), src.Interface().(EFSVolumeConfiguration)
if !srcStruct.EmptyUIDConfig() {
dstStruct.unsetBYOConfig()
}
if !srcStruct.EmptyBYOConfig() {
dstStruct.unsetUIDConfig()
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type sqsQueueOrBoolTransformer struct{}
// Transformer returns custom merge logic for SQSQueueOrBool's fields.
func (q sqsQueueOrBoolTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(SQSQueueOrBool{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(SQSQueueOrBool), src.Interface().(SQSQueueOrBool)
if !srcStruct.Advanced.IsEmpty() {
dstStruct.Enabled = nil
}
if srcStruct.Enabled != nil {
dstStruct.Advanced = SQSQueue{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type httpOrBoolTransformer struct{}
// Transformer returns custom merge logic for HTTPOrBool's fields.
func (t httpOrBoolTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(HTTPOrBool{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(HTTPOrBool), src.Interface().(HTTPOrBool)
if !srcStruct.HTTP.IsEmpty() {
dstStruct.Enabled = nil
}
if srcStruct.Enabled != nil {
dstStruct.HTTP = HTTP{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type secretTransformer struct{}
// Transformer returns custom merge logic for Secret's fields.
func (t secretTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(Secret{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(Secret), src.Interface().(Secret)
if !srcStruct.fromSecretsManager.IsEmpty() {
dstStruct.from = stringOrFromCFN{}
}
if !srcStruct.from.isEmpty() {
dstStruct.fromSecretsManager = secretsManagerSecret{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type environmentCDNConfigTransformer struct{}
// Transformer returns custom merge logic for environmentCDNConfig's fields.
func (t environmentCDNConfigTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ != reflect.TypeOf(EnvironmentCDNConfig{}) {
return nil
}
return func(dst, src reflect.Value) error {
dstStruct, srcStruct := dst.Interface().(EnvironmentCDNConfig), src.Interface().(EnvironmentCDNConfig)
if !srcStruct.Config.isEmpty() {
dstStruct.Enabled = nil
}
if srcStruct.Enabled != nil {
dstStruct.Config = AdvancedCDNConfig{}
}
if dst.CanSet() { // For extra safety to prevent panicking.
dst.Set(reflect.ValueOf(dstStruct))
}
return nil
}
}
type basicTransformer struct{}
// Transformer returns custom merge logic for volume's fields.
func (t basicTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ.Kind() == reflect.Slice {
return transformPBasicOrSlice
}
if typ.Kind() == reflect.Ptr {
for _, k := range basicKinds {
if typ.Elem().Kind() == k {
return transformPBasicOrSlice
}
}
}
return nil
}
func transformPBasicOrSlice(dst, src reflect.Value) error {
// This condition shouldn't ever be true. It's merely here for extra safety so that `src.IsNil` won't panic.
if src.Kind() != reflect.Ptr && src.Kind() != reflect.Slice {
return nil
}
if src.IsNil() {
return nil
}
if dst.CanSet() {
dst.Set(src)
}
return nil
}
| 625 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/imdario/mergo"
"github.com/stretchr/testify/require"
)
func TestBasicTransformer_Transformer(t *testing.T) {
type testBasicTransformerStruct struct {
PBool *bool
PInt *int
PInt64 *int64
PUint16 *uint16
PUint32 *uint32
PString *string
PSlice *[]string
Slice []string
}
testCases := map[string]struct {
original func(s *testBasicTransformerStruct)
override func(s *testBasicTransformerStruct)
wanted func(s *testBasicTransformerStruct)
}{
"overridden": {
original: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(false)
s.PInt = aws.Int(24)
s.PInt64 = aws.Int64(24)
s.PUint16 = aws.Uint16(24)
s.PUint32 = aws.Uint32(24)
s.PString = aws.String("horse")
s.Slice = []string{"horses", "run"}
mockSlice := []string{"horses", "run"}
s.PSlice = &mockSlice
},
override: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(true)
s.PInt = aws.Int(42)
s.PInt64 = aws.Int64(42)
s.PUint16 = aws.Uint16(42)
s.PUint32 = aws.Uint32(42)
s.PString = aws.String("pony")
s.Slice = []string{"pony", "run"}
mockSlice := []string{"pony", "run"}
s.PSlice = &mockSlice
},
wanted: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(true)
s.PInt = aws.Int(42)
s.PInt64 = aws.Int64(42)
s.PUint16 = aws.Uint16(42)
s.PUint32 = aws.Uint32(42)
s.PString = aws.String("pony")
s.Slice = []string{"pony", "run"}
mockSlice := []string{"pony", "run"}
s.PSlice = &mockSlice
},
},
"explicitly overridden by zero value": {
original: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(true)
s.PInt = aws.Int(24)
s.PInt64 = aws.Int64(24)
s.PUint16 = aws.Uint16(24)
s.PUint32 = aws.Uint32(24)
s.PString = aws.String("horse")
s.Slice = []string{"horses", "run"}
mockSlice := []string{"horses", "run"}
s.PSlice = &mockSlice
},
override: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(false)
s.PInt = aws.Int(0)
s.PInt64 = aws.Int64(0)
s.PUint16 = aws.Uint16(0)
s.PUint32 = aws.Uint32(0)
s.PString = aws.String("")
s.Slice = []string{}
var mockSlice []string
s.PSlice = &mockSlice
},
wanted: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(false)
s.PInt = aws.Int(0)
s.PInt64 = aws.Int64(0)
s.PUint16 = aws.Uint16(0)
s.PUint32 = aws.Uint32(0)
s.PString = aws.String("")
s.Slice = []string{}
var mockSlice []string
s.PSlice = &mockSlice
},
},
"not overridden by nil": {
original: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(true)
s.PInt = aws.Int(24)
s.PInt64 = aws.Int64(24)
s.PUint16 = aws.Uint16(24)
s.PUint32 = aws.Uint32(24)
s.PString = aws.String("horse")
s.Slice = []string{"horses", "run"}
mockSlice := []string{"horses", "run"}
s.PSlice = &mockSlice
},
override: func(s *testBasicTransformerStruct) {},
wanted: func(s *testBasicTransformerStruct) {
s.PBool = aws.Bool(true)
s.PInt = aws.Int(24)
s.PInt64 = aws.Int64(24)
s.PUint16 = aws.Uint16(24)
s.PUint32 = aws.Uint32(24)
s.PString = aws.String("horse")
s.Slice = []string{"horses", "run"}
mockSlice := []string{"horses", "run"}
s.PSlice = &mockSlice
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted testBasicTransformerStruct
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
err := mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(basicTransformer{}))
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestImageTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(i *Image)
override func(i *Image)
wanted func(i *Image)
}{
"build set to empty if location is not nil": {
original: func(i *Image) {
i.Build = BuildArgsOrString{
BuildString: aws.String("mockBuild"),
}
},
override: func(i *Image) {
i.Location = aws.String("mockLocation")
},
wanted: func(i *Image) {
i.Location = aws.String("mockLocation")
i.Build = BuildArgsOrString{
BuildString: nil,
BuildArgs: DockerBuildArgs{},
}
},
},
"location set to empty if build is not nil": {
original: func(i *Image) {
i.Location = aws.String("mockLocation")
},
override: func(i *Image) {
i.Build = BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("mockDockerfile"),
},
}
},
wanted: func(i *Image) {
i.Location = nil
i.Build = BuildArgsOrString{
BuildString: nil,
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("mockDockerfile"),
},
}
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted Image
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(imageTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestBuildArgsOrStringTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(b *BuildArgsOrString)
override func(b *BuildArgsOrString)
wanted func(b *BuildArgsOrString)
}{
"build string set to empty if build args is not nil": {
original: func(b *BuildArgsOrString) {
b.BuildString = aws.String("mockBuild")
},
override: func(b *BuildArgsOrString) {
b.BuildArgs = DockerBuildArgs{
Context: aws.String("mockContext"),
Dockerfile: aws.String("mockDockerfile"),
}
},
wanted: func(b *BuildArgsOrString) {
b.BuildString = nil
b.BuildArgs = DockerBuildArgs{
Context: aws.String("mockContext"),
Dockerfile: aws.String("mockDockerfile"),
}
},
},
"build args set to empty if build string is not nil": {
original: func(b *BuildArgsOrString) {
b.BuildArgs = DockerBuildArgs{
Context: aws.String("mockContext"),
Dockerfile: aws.String("mockDockerfile"),
}
},
override: func(b *BuildArgsOrString) {
b.BuildString = aws.String("mockBuild")
},
wanted: func(b *BuildArgsOrString) {
b.BuildString = aws.String("mockBuild")
b.BuildArgs = DockerBuildArgs{}
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted BuildArgsOrString
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(buildArgsOrStringTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestAliasTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(*Alias)
override func(*Alias)
wanted func(*Alias)
}{
"advanced alias set to empty if string slice is not nil": {
original: func(a *Alias) {
a.AdvancedAliases = []AdvancedAlias{
{
Alias: aws.String("mockAlias"),
},
}
},
override: func(a *Alias) {
a.StringSliceOrString = StringSliceOrString{
StringSlice: []string{"mock", "string", "slice"},
}
},
wanted: func(a *Alias) {
a.StringSliceOrString.StringSlice = []string{"mock", "string", "slice"}
},
},
"StringSliceOrString set to empty if advanced alias is not nil": {
original: func(a *Alias) {
a.StringSliceOrString = StringSliceOrString{
StringSlice: []string{"mock", "string", "slice"},
}
},
override: func(a *Alias) {
a.AdvancedAliases = []AdvancedAlias{
{
Alias: aws.String("mockAlias"),
},
}
},
wanted: func(a *Alias) {
a.AdvancedAliases = []AdvancedAlias{
{
Alias: aws.String("mockAlias"),
},
}
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted Alias
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(aliasTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestStringSliceOrStringTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(s *StringSliceOrString)
override func(s *StringSliceOrString)
wanted func(s *StringSliceOrString)
}{
"string set to empty if string slice is not nil": {
original: func(s *StringSliceOrString) {
s.String = aws.String("mockString")
},
override: func(s *StringSliceOrString) {
s.StringSlice = []string{"mock", "string", "slice"}
},
wanted: func(s *StringSliceOrString) {
s.StringSlice = []string{"mock", "string", "slice"}
},
},
"string slice set to empty if string is not nil": {
original: func(s *StringSliceOrString) {
s.StringSlice = []string{"mock", "string", "slice"}
},
override: func(s *StringSliceOrString) {
s.String = aws.String("mockString")
},
wanted: func(s *StringSliceOrString) {
s.String = aws.String("mockString")
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted StringSliceOrString
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(stringSliceOrStringTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestPlatformArgsOrStringTransformer_Transformer(t *testing.T) {
mockPlatformStr := PlatformString("mockString")
testCases := map[string]struct {
original func(p *PlatformArgsOrString)
override func(p *PlatformArgsOrString)
wanted func(p *PlatformArgsOrString)
}{
"string set to empty if args is not nil": {
original: func(p *PlatformArgsOrString) {
p.PlatformString = &mockPlatformStr
},
override: func(p *PlatformArgsOrString) {
p.PlatformArgs = PlatformArgs{
OSFamily: aws.String("mock"),
Arch: aws.String("platformTest"),
}
},
wanted: func(p *PlatformArgsOrString) {
p.PlatformArgs = PlatformArgs{
OSFamily: aws.String("mock"),
Arch: aws.String("platformTest"),
}
},
},
"args set to empty if string is not nil": {
original: func(p *PlatformArgsOrString) {
p.PlatformArgs = PlatformArgs{
OSFamily: aws.String("mock"),
Arch: aws.String("platformTest"),
}
},
override: func(p *PlatformArgsOrString) {
p.PlatformString = &mockPlatformStr
},
wanted: func(p *PlatformArgsOrString) {
p.PlatformString = &mockPlatformStr
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted PlatformArgsOrString
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(platformArgsOrStringTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestPlacementArgsOrStringTransformer_Transformer(t *testing.T) {
mockPlacementStr := PlacementString("mockString")
testCases := map[string]struct {
original func(p *PlacementArgOrString)
override func(p *PlacementArgOrString)
wanted func(p *PlacementArgOrString)
}{
"string set to empty if args is not nil": {
original: func(p *PlacementArgOrString) {
p.PlacementString = &mockPlacementStr
},
override: func(p *PlacementArgOrString) {
p.PlacementArgs = PlacementArgs{
Subnets: SubnetListOrArgs{
IDs: []string{"id1"},
},
}
},
wanted: func(p *PlacementArgOrString) {
p.PlacementArgs = PlacementArgs{
Subnets: SubnetListOrArgs{
IDs: []string{"id1"},
},
}
},
},
"args set to empty if string is not nil": {
original: func(p *PlacementArgOrString) {
p.PlacementArgs = PlacementArgs{
Subnets: SubnetListOrArgs{
IDs: []string{"id1"},
},
}
},
override: func(p *PlacementArgOrString) {
p.PlacementString = &mockPlacementStr
},
wanted: func(p *PlacementArgOrString) {
p.PlacementString = &mockPlacementStr
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted PlacementArgOrString
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(placementArgOrStringTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestSubnetListOrArgsTransformer_Transformer(t *testing.T) {
mockSubnetIDs := []string{"id1", "id2"}
mockSubnetFromTags := map[string]StringSliceOrString{
"foo": {
String: aws.String("bar"),
},
}
testCases := map[string]struct {
original func(p *SubnetListOrArgs)
override func(p *SubnetListOrArgs)
wanted func(p *SubnetListOrArgs)
}{
"string slice set to empty if args is not nil": {
original: func(s *SubnetListOrArgs) {
s.IDs = mockSubnetIDs
},
override: func(s *SubnetListOrArgs) {
s.SubnetArgs = SubnetArgs{
FromTags: mockSubnetFromTags,
}
},
wanted: func(s *SubnetListOrArgs) {
s.SubnetArgs = SubnetArgs{
FromTags: mockSubnetFromTags,
}
},
},
"args set to empty if string is not nil": {
original: func(s *SubnetListOrArgs) {
s.SubnetArgs = SubnetArgs{
FromTags: mockSubnetFromTags,
}
},
override: func(s *SubnetListOrArgs) {
s.IDs = mockSubnetIDs
},
wanted: func(s *SubnetListOrArgs) {
s.IDs = mockSubnetIDs
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted SubnetListOrArgs
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(subnetListOrArgsTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestServiceConnectTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(p *ServiceConnectBoolOrArgs)
override func(p *ServiceConnectBoolOrArgs)
wanted func(p *ServiceConnectBoolOrArgs)
}{
"bool set to empty if args is not nil": {
original: func(s *ServiceConnectBoolOrArgs) {
s.EnableServiceConnect = aws.Bool(false)
},
override: func(s *ServiceConnectBoolOrArgs) {
s.ServiceConnectArgs = ServiceConnectArgs{
Alias: aws.String("api"),
}
},
wanted: func(s *ServiceConnectBoolOrArgs) {
s.ServiceConnectArgs = ServiceConnectArgs{
Alias: aws.String("api"),
}
},
},
"args set to empty if bool is not nil": {
original: func(s *ServiceConnectBoolOrArgs) {
s.ServiceConnectArgs = ServiceConnectArgs{
Alias: aws.String("api"),
}
},
override: func(s *ServiceConnectBoolOrArgs) {
s.EnableServiceConnect = aws.Bool(true)
},
wanted: func(s *ServiceConnectBoolOrArgs) {
s.EnableServiceConnect = aws.Bool(true)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted ServiceConnectBoolOrArgs
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(serviceConnectTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
type unionTransformerTest[Basic, Advanced any] struct {
original Union[Basic, Advanced]
override Union[Basic, Advanced]
expected Union[Basic, Advanced]
}
func TestTransformer_Generic(t *testing.T) {
runUnionTransformerTests(t, map[string]unionTransformerTest[any, any]{
"switches to Simple from Advanced if overridden": {
original: AdvancedToUnion[any, any](nil),
override: BasicToUnion[any, any](nil),
expected: BasicToUnion[any, any](nil),
},
"switches to Advanced from Simple if overridden": {
original: BasicToUnion[any, any](nil),
override: AdvancedToUnion[any, any](nil),
expected: AdvancedToUnion[any, any](nil),
},
"switches to Simple if original unset": {
original: Union[any, any]{},
override: BasicToUnion[any, any](nil),
expected: BasicToUnion[any, any](nil),
},
"switches to Advanced if original unset": {
original: Union[any, any]{},
override: AdvancedToUnion[any, any](nil),
expected: AdvancedToUnion[any, any](nil),
},
})
}
func TestTransformer_StringOrHealthCheckArgs(t *testing.T) {
runUnionTransformerTests(t, map[string]unionTransformerTest[string, HTTPHealthCheckArgs]{
"string unset if args set": {
original: BasicToUnion[string, HTTPHealthCheckArgs]("mockPath"),
override: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("mockPathArgs"),
SuccessCodes: aws.String("200"),
}),
expected: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("mockPathArgs"),
SuccessCodes: aws.String("200"),
}),
},
"args unset if string set": {
original: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("mockPathArgs"),
SuccessCodes: aws.String("200"),
}),
override: BasicToUnion[string, HTTPHealthCheckArgs]("mockPath"),
expected: BasicToUnion[string, HTTPHealthCheckArgs]("mockPath"),
},
"string merges correctly": {
original: BasicToUnion[string, HTTPHealthCheckArgs]("path"),
override: BasicToUnion[string, HTTPHealthCheckArgs]("newPath"),
expected: BasicToUnion[string, HTTPHealthCheckArgs]("newPath"),
},
"args merge correctly": {
original: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("mockPathArgs"),
SuccessCodes: aws.String("200"),
HealthyThreshold: aws.Int64(10),
}),
override: AdvancedToUnion[string](HTTPHealthCheckArgs{
SuccessCodes: aws.String("420"),
UnhealthyThreshold: aws.Int64(20),
}),
expected: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("mockPathArgs"), // merged unchanged
SuccessCodes: aws.String("420"), // updated
HealthyThreshold: aws.Int64(10), // comes from original
UnhealthyThreshold: aws.Int64(20), // comes from override
}),
},
})
}
func runUnionTransformerTests[Basic, Advanced any](t *testing.T, tests map[string]unionTransformerTest[Basic, Advanced]) {
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
// Perform default merge.
err := mergo.Merge(&tc.original, tc.override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&tc.original, tc.override, mergo.WithOverride, mergo.WithTransformers(unionTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, tc.expected, tc.original)
})
}
}
func TestUnionPanicRecover(t *testing.T) {
// trick the transformer logic into thinking
// this is the real manifest.Union type
type Union[T any] struct{}
err := mergo.Merge(&Union[any]{}, &Union[any]{}, mergo.WithTransformers(unionTransformer{}))
require.EqualError(t, err, "override union: reflect: call of reflect.Value.Call on zero Value")
}
func TestCountTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(c *Count)
override func(c *Count)
wanted func(c *Count)
}{
"value set to empty if advanced count is not nil": {
original: func(c *Count) {
c.Value = aws.Int(24)
},
override: func(c *Count) {
c.AdvancedCount = AdvancedCount{
Spot: aws.Int(42),
}
},
wanted: func(c *Count) {
c.AdvancedCount = AdvancedCount{
Spot: aws.Int(42),
}
},
},
"advanced count set to empty if value is not nil": {
original: func(c *Count) {
c.AdvancedCount = AdvancedCount{
Spot: aws.Int(42),
}
},
override: func(c *Count) {
c.Value = aws.Int(24)
},
wanted: func(c *Count) {
c.Value = aws.Int(24)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted Count
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(countTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestAdvancedCountTransformer_Transformer(t *testing.T) {
perc := Percentage(80)
mockConfig := ScalingConfigOrT[Percentage]{
Value: &perc,
}
mockReq := ScalingConfigOrT[int]{
Value: aws.Int(42),
}
testCases := map[string]struct {
original func(a *AdvancedCount)
override func(a *AdvancedCount)
wanted func(a *AdvancedCount)
}{
"spot set to empty if auto scaling is not empty": {
original: func(a *AdvancedCount) {
a.Spot = aws.Int(24)
},
override: func(a *AdvancedCount) {
a.Range = Range{
Value: (*IntRangeBand)(aws.String("1-10")),
}
a.CPU = mockConfig
a.Requests = mockReq
},
wanted: func(a *AdvancedCount) {
a.Range = Range{
Value: (*IntRangeBand)(aws.String("1-10")),
}
a.CPU = mockConfig
a.Requests = mockReq
},
},
"auto scaling set to empty if spot is not nil": {
original: func(a *AdvancedCount) {
a.Range = Range{
Value: (*IntRangeBand)(aws.String("1-10")),
}
a.CPU = mockConfig
a.Requests = mockReq
},
override: func(a *AdvancedCount) {
a.Spot = aws.Int(24)
},
wanted: func(a *AdvancedCount) {
a.Spot = aws.Int(24)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted AdvancedCount
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(advancedCountTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestScalingConfigOrT_Transformer(t *testing.T) {
perc := Percentage(80)
mockConfig := AdvancedScalingConfig[Percentage]{
Value: &perc,
}
testCases := map[string]struct {
original func(s *ScalingConfigOrT[Percentage])
override func(s *ScalingConfigOrT[Percentage])
wanted func(s *ScalingConfigOrT[Percentage])
}{
"advanced config value set to nil if percentage is not nil": {
original: func(s *ScalingConfigOrT[Percentage]) {
s.ScalingConfig = mockConfig
},
override: func(s *ScalingConfigOrT[Percentage]) {
s.Value = &perc
},
wanted: func(s *ScalingConfigOrT[Percentage]) {
s.Value = &perc
},
},
"percentage set to nil if advanced config value is not nil": {
original: func(s *ScalingConfigOrT[Percentage]) {
s.Value = &perc
},
override: func(s *ScalingConfigOrT[Percentage]) {
s.ScalingConfig = mockConfig
},
wanted: func(s *ScalingConfigOrT[Percentage]) {
s.ScalingConfig = mockConfig
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted ScalingConfigOrT[Percentage]
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(scalingConfigOrTTransformer[Percentage]{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestRangeTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(r *Range)
override func(r *Range)
wanted func(r *Range)
}{
"value set to empty if range config is not nil": {
original: func(r *Range) {
r.Value = (*IntRangeBand)(aws.String("24-42"))
},
override: func(r *Range) {
r.RangeConfig = RangeConfig{
Min: aws.Int(5),
Max: aws.Int(42),
SpotFrom: aws.Int(13),
}
},
wanted: func(r *Range) {
r.RangeConfig = RangeConfig{
Min: aws.Int(5),
Max: aws.Int(42),
SpotFrom: aws.Int(13),
}
},
},
"range config set to empty if value is not nil": {
original: func(r *Range) {
r.RangeConfig = RangeConfig{
Min: aws.Int(5),
Max: aws.Int(42),
SpotFrom: aws.Int(13),
}
},
override: func(r *Range) {
r.Value = (*IntRangeBand)(aws.String("24-42"))
},
wanted: func(r *Range) {
r.Value = (*IntRangeBand)(aws.String("24-42"))
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted Range
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(rangeTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestEfsConfigOrBoolTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(e *EFSConfigOrBool)
override func(e *EFSConfigOrBool)
wanted func(e *EFSConfigOrBool)
}{
"bool set to empty if config is not nil": {
original: func(e *EFSConfigOrBool) {
e.Enabled = aws.Bool(true)
},
override: func(e *EFSConfigOrBool) {
e.Advanced = EFSVolumeConfiguration{
UID: aws.Uint32(31415926),
GID: aws.Uint32(53589793),
}
},
wanted: func(e *EFSConfigOrBool) {
e.Advanced = EFSVolumeConfiguration{
UID: aws.Uint32(31415926),
GID: aws.Uint32(53589793),
}
},
},
"config set to empty if bool is not nil": {
original: func(e *EFSConfigOrBool) {
e.Advanced = EFSVolumeConfiguration{
UID: aws.Uint32(31415926),
GID: aws.Uint32(53589793),
}
},
override: func(e *EFSConfigOrBool) {
e.Enabled = aws.Bool(true)
},
wanted: func(e *EFSConfigOrBool) {
e.Enabled = aws.Bool(true)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted EFSConfigOrBool
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(efsConfigOrBoolTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestEfsVolumeConfigurationTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(e *EFSVolumeConfiguration)
override func(e *EFSVolumeConfiguration)
wanted func(e *EFSVolumeConfiguration)
}{
"UID config set to empty if BYO config is not empty": {
original: func(e *EFSVolumeConfiguration) {
e.UID = aws.Uint32(31415926)
e.GID = aws.Uint32(53589793)
},
override: func(e *EFSVolumeConfiguration) {
e.FileSystemID = aws.String("mockFileSystem")
e.RootDirectory = aws.String("mockRootDir")
},
wanted: func(e *EFSVolumeConfiguration) {
e.FileSystemID = aws.String("mockFileSystem")
e.RootDirectory = aws.String("mockRootDir")
},
},
"BYO config set to empty if UID config is not empty": {
original: func(e *EFSVolumeConfiguration) {
e.FileSystemID = aws.String("mockFileSystem")
e.RootDirectory = aws.String("mockRootDir")
},
override: func(e *EFSVolumeConfiguration) {
e.UID = aws.Uint32(31415926)
e.GID = aws.Uint32(53589793)
},
wanted: func(e *EFSVolumeConfiguration) {
e.UID = aws.Uint32(31415926)
e.GID = aws.Uint32(53589793)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted EFSVolumeConfiguration
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(efsVolumeConfigurationTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestSQSQueueOrBoolTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(e *SQSQueueOrBool)
override func(e *SQSQueueOrBool)
wanted func(e *SQSQueueOrBool)
}{
"bool set to empty if config is not nil": {
original: func(e *SQSQueueOrBool) {
e.Enabled = aws.Bool(true)
},
override: func(e *SQSQueueOrBool) {
e.Advanced = SQSQueue{
Retention: durationp(5 * time.Second),
}
},
wanted: func(e *SQSQueueOrBool) {
e.Advanced = SQSQueue{
Retention: durationp(5 * time.Second),
}
},
},
"config set to empty if bool is not nil": {
original: func(e *SQSQueueOrBool) {
e.Advanced = SQSQueue{
Retention: durationp(5 * time.Second),
}
},
override: func(e *SQSQueueOrBool) {
e.Enabled = aws.Bool(true)
},
wanted: func(e *SQSQueueOrBool) {
e.Enabled = aws.Bool(true)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted SQSQueueOrBool
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(sqsQueueOrBoolTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestHTTPOrBoolTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(r *HTTPOrBool)
override func(r *HTTPOrBool)
wanted func(r *HTTPOrBool)
}{
"bool set to empty if config is not nil": {
original: func(r *HTTPOrBool) {
r.Enabled = aws.Bool(true)
},
override: func(r *HTTPOrBool) {
r.HTTP = HTTP{
Main: RoutingRule{
Path: aws.String("mockPath"),
},
}
},
wanted: func(r *HTTPOrBool) {
r.HTTP = HTTP{
Main: RoutingRule{
Path: aws.String("mockPath"),
},
}
},
},
"config set to empty if bool is not nil": {
original: func(r *HTTPOrBool) {
r.HTTP = HTTP{
Main: RoutingRule{
Path: aws.String("mockPath"),
},
}
},
override: func(r *HTTPOrBool) {
r.Enabled = aws.Bool(false)
},
wanted: func(r *HTTPOrBool) {
r.Enabled = aws.Bool(false)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted HTTPOrBool
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(httpOrBoolTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestSecretTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(s *Secret)
override func(s *Secret)
wanted func(s *Secret)
}{
`"from" set to empty when overriding with "secretsmanager"`: {
original: func(s *Secret) {
s.from = stringOrFromCFN{
Plain: aws.String("/github/token"),
}
},
override: func(s *Secret) {
s.fromSecretsManager = secretsManagerSecret{
Name: aws.String("aes128-1a2b3c"),
}
},
wanted: func(s *Secret) {
s.fromSecretsManager = secretsManagerSecret{
Name: aws.String("aes128-1a2b3c"),
}
},
},
`"secretsmanager" set to empty when overriding with "from"`: {
original: func(s *Secret) {
s.fromSecretsManager = secretsManagerSecret{
Name: aws.String("aes128-1a2b3c"),
}
},
override: func(s *Secret) {
s.from = stringOrFromCFN{
Plain: aws.String("/github/token"),
}
},
wanted: func(s *Secret) {
s.from = stringOrFromCFN{
Plain: aws.String("/github/token"),
}
},
},
`"secretsmanager" set to empty when overriding with imported "from"`: {
original: func(s *Secret) {
s.fromSecretsManager = secretsManagerSecret{
Name: aws.String("aes128-1a2b3c"),
}
},
override: func(s *Secret) {
s.from = stringOrFromCFN{
FromCFN: fromCFN{
Name: aws.String("stack-SSMGHTokenName"),
},
}
},
wanted: func(s *Secret) {
s.from = stringOrFromCFN{
FromCFN: fromCFN{
Name: aws.String("stack-SSMGHTokenName"),
},
}
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted Secret
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(secretTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
func TestEnvironmentCDNConfigTransformer_Transformer(t *testing.T) {
testCases := map[string]struct {
original func(cfg *EnvironmentCDNConfig)
override func(cfg *EnvironmentCDNConfig)
wanted func(cfg *EnvironmentCDNConfig)
}{
"cdnconfig set to empty if enabled is not nil": {
original: func(cfg *EnvironmentCDNConfig) {
cfg.Config = AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
}
},
override: func(cfg *EnvironmentCDNConfig) {
cfg.Enabled = aws.Bool(true)
},
wanted: func(cfg *EnvironmentCDNConfig) {
cfg.Enabled = aws.Bool(true)
},
},
"enabled set to nil if cdnconfig is not empty": {
original: func(cfg *EnvironmentCDNConfig) {
cfg.Enabled = aws.Bool(true)
},
override: func(cfg *EnvironmentCDNConfig) {
cfg.Config = AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
}
},
wanted: func(cfg *EnvironmentCDNConfig) {
cfg.Config = AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
}
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var dst, override, wanted EnvironmentCDNConfig
tc.original(&dst)
tc.override(&override)
tc.wanted(&wanted)
// Perform default merge.
err := mergo.Merge(&dst, override, mergo.WithOverride)
require.NoError(t, err)
// Use custom transformer.
err = mergo.Merge(&dst, override, mergo.WithOverride, mergo.WithTransformers(environmentCDNConfigTransformer{}))
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, wanted, dst)
})
}
}
| 1,387 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"fmt"
"reflect"
"gopkg.in/yaml.v3"
)
// Union is a type used for yaml keys that may be of type Basic or Advanced.
// Union will only ever hold one of the underlying types, never both.
//
// Union is exported to enable type embedding.
type Union[Basic, Advanced any] struct {
// isBasic is true if the underlying type of Union is Basic.
isBasic bool
// Basic holds the value of Union if IsBasic() is true.
// If IsBasic() is false, this is the zero value of type Basic.
//
// Basic is exported to support mergo. It should not be set
// directly. Use NewUnionBasic() to create a Union with Basic set.
Basic Basic
// isAdvanced is true if the underlying type of Union is Advanced.
isAdvanced bool
// Advanced holds the value of Union if IsAdvanced() is true.
// If IsAdvanced() is false, this is the zero value of type Advanced.
//
// Advanced is exported to support mergo. It should not be set
// directly. Use NewUnionAdvanced() to create a Union with Advanced set.
Advanced Advanced
}
// BasicToUnion creates a new Union[Basic, Advanced] with the underlying
// type set to Basic, holding val.
func BasicToUnion[Basic, Advanced any](val Basic) Union[Basic, Advanced] {
return Union[Basic, Advanced]{
isBasic: true,
Basic: val,
}
}
// AdvancedToUnion creates a new Union[Basic, Advanced] with the underlying
// type set to Advanced, holding val.
func AdvancedToUnion[Basic, Advanced any](val Advanced) Union[Basic, Advanced] {
return Union[Basic, Advanced]{
isAdvanced: true,
Advanced: val,
}
}
// IsBasic returns true if the underlying value of t is type Basic.
func (t Union[_, _]) IsBasic() bool {
return t.isBasic
}
// IsAdvanced returns true if the underlying value of t is type Advanced.
func (t Union[_, _]) IsAdvanced() bool {
return t.isAdvanced
}
// UnmarshalYAML decodes value into either type Basic or Advanced, and stores that value
// in t. Value is first decoded into type Basic, and t will hold type Basic if
// (1) There was no error decoding value into type Basic and
// (2) Basic.IsZero() returns false OR Basic is not zero via reflection.
//
// If Basic didn't meet the above criteria, then value is decoded into type Advanced.
// t will hold type Advanced if Advanced meets the same conditions that were required for type Basic.
//
// An error is returned if value fails to decode into either type
// or both types are zero after decoding.
func (t *Union[Basic, Advanced]) UnmarshalYAML(value *yaml.Node) error {
var basic Basic
bErr := value.Decode(&basic)
if bErr == nil && !isZero(basic) {
t.SetBasic(basic)
return nil
}
var advanced Advanced
aErr := value.Decode(&advanced)
if aErr == nil && !isZero(advanced) {
t.SetAdvanced(advanced)
return nil
}
// set an error to communicate why the Union is not
// of each type
switch {
case bErr == nil && aErr == nil:
return fmt.Errorf("ambiguous value: neither the basic or advanced form for the field was set")
case bErr == nil:
bErr = fmt.Errorf("is zero")
case aErr == nil:
aErr = fmt.Errorf("is zero")
}
// multiline error because yaml.TypeError (which this likely is)
// is already a multiline error
return fmt.Errorf("unmarshal to basic form %T: %s\nunmarshal to advanced form %T: %s", t.Basic, bErr, t.Advanced, aErr)
}
// isZero returns true if:
// - v is a yaml.Zeroer and IsZero().
// - v is not a yaml.Zeroer and determined to be zero via reflection.
func isZero(v any) bool {
if z, ok := v.(yaml.IsZeroer); ok {
return z.IsZero()
}
return reflect.ValueOf(v).IsZero()
}
// MarshalYAML implements yaml.Marshaler.
func (t Union[_, _]) MarshalYAML() (interface{}, error) {
switch {
case t.IsBasic():
return t.Basic, nil
case t.IsAdvanced():
return t.Advanced, nil
}
return nil, nil
}
// IsZero returns true if the set value of t
// is determined to be zero via yaml.Zeroer
// or reflection. It also returns true if
// neither value for t is set.
func (t Union[_, _]) IsZero() bool {
if t.IsBasic() {
return isZero(t.Basic)
}
if t.IsAdvanced() {
return isZero(t.Advanced)
}
return true
}
// validate calls t.validate() on the set value of t. If the
// current value doesn't have a validate() function, it returns nil.
func (t Union[_, _]) validate() error {
// type declarations inside generic functions not currently supported,
// so we use an inline validate() interface
if t.IsBasic() {
if v, ok := any(t.Basic).(interface{ validate() error }); ok {
return v.validate()
}
return nil
}
if t.IsAdvanced() {
if v, ok := any(t.Advanced).(interface{ validate() error }); ok {
return v.validate()
}
return nil
}
return nil
}
// SetBasic changes the value of the Union to v.
func (t *Union[Basic, Advanced]) SetBasic(v Basic) {
var zero Advanced
t.isAdvanced, t.Advanced = false, zero
t.isBasic, t.Basic = true, v
}
// SetAdvanced changes the value of the Union to v.
func (t *Union[Basic, Advanced]) SetAdvanced(v Advanced) {
var zero Basic
t.isBasic, t.Basic = false, zero
t.isAdvanced, t.Advanced = true, v
}
| 177 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"bytes"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
type unionTest[A, B any] struct {
yaml string
expectedValue Union[A, B]
expectedUnmarshalErr string
expectedYAML string
}
func TestUnion(t *testing.T) {
runUnionTest(t, "string or []string, is string", unionTest[string, []string]{
yaml: `key: hello`,
expectedValue: BasicToUnion[string, []string]("hello"),
})
runUnionTest(t, "string or []string, is zero string, error", unionTest[string, []string]{
yaml: `key: ""`,
expectedUnmarshalErr: "unmarshal to basic form string: is zero\nunmarshal to advanced form []string: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `` into []string",
expectedYAML: `key: null`,
})
runUnionTest(t, "*string or []string, is zero string", unionTest[*string, []string]{
yaml: `key: ""`,
expectedValue: BasicToUnion[*string, []string](aws.String("")),
})
runUnionTest(t, "string or []string, is []string", unionTest[string, []string]{
yaml: `
key:
- asdf
- jkl;`,
expectedValue: AdvancedToUnion[string]([]string{"asdf", "jkl;"}),
})
runUnionTest(t, "bool or semiComplexStruct, is false bool", unionTest[bool, semiComplexStruct]{
yaml: `key: false`,
expectedUnmarshalErr: "unmarshal to basic form bool: is zero\nunmarshal to advanced form manifest.semiComplexStruct: yaml: unmarshal errors:\n line 1: cannot unmarshal !!bool `false` into manifest.semiComplexStruct",
expectedYAML: `key: null`,
})
runUnionTest(t, "*bool or semiComplexStruct, is false bool", unionTest[*bool, semiComplexStruct]{
yaml: `key: false`,
expectedValue: BasicToUnion[*bool, semiComplexStruct](aws.Bool(false)),
})
runUnionTest(t, "bool or semiComplexStruct, is true bool", unionTest[bool, semiComplexStruct]{
yaml: `key: true`,
expectedValue: BasicToUnion[bool, semiComplexStruct](true),
})
runUnionTest(t, "bool or semiComplexStruct, is semiComplexStruct with all fields set", unionTest[bool, semiComplexStruct]{
yaml: `
key:
str: asdf
bool: true
int: 420
str_ptr: jkl;
bool_ptr: false
int_ptr: 70`,
expectedValue: AdvancedToUnion[bool](semiComplexStruct{
Str: "asdf",
Bool: true,
Int: 420,
StrPtr: aws.String("jkl;"),
BoolPtr: aws.Bool(false),
IntPtr: aws.Int(70),
}),
})
runUnionTest(t, "bool or semiComplexStruct, is semiComplexStruct without strs set", unionTest[bool, semiComplexStruct]{
yaml: `
key:
bool: true
int: 420
bool_ptr: false
int_ptr: 70`,
expectedValue: AdvancedToUnion[bool](semiComplexStruct{
Bool: true,
Int: 420,
BoolPtr: aws.Bool(false),
IntPtr: aws.Int(70),
}),
})
runUnionTest(t, "string or semiComplexStruct, is struct with invalid fields, error", unionTest[string, semiComplexStruct]{
yaml: `
key:
invalid_key: asdf`,
expectedUnmarshalErr: `unmarshal to basic form string: yaml: unmarshal errors:
line 3: cannot unmarshal !!map into string
unmarshal to advanced form manifest.semiComplexStruct: is zero`,
expectedYAML: `key: null`,
})
runUnionTest(t, "complexStruct or semiComplexStruct, is complexStruct with all fields", unionTest[complexStruct, semiComplexStruct]{
yaml: `
key:
str_ptr: qwerty
semi_complex_struct:
str: asdf
bool: true
int: 420
str_ptr: jkl;
bool_ptr: false
int_ptr: 70`,
expectedValue: BasicToUnion[complexStruct, semiComplexStruct](complexStruct{
StrPtr: aws.String("qwerty"),
SemiComplexStruct: semiComplexStruct{
Str: "asdf",
Bool: true,
Int: 420,
StrPtr: aws.String("jkl;"),
BoolPtr: aws.Bool(false),
IntPtr: aws.Int(70),
},
}),
})
runUnionTest(t, "two structs, basic type doesn't support IsZero, correct yaml", unionTest[notIsZeroer, isZeroer]{
yaml: `
key:
subkey: hello`,
expectedValue: BasicToUnion[notIsZeroer, isZeroer](notIsZeroer{"hello"}),
})
runUnionTest(t, "two structs, basic type doesn't support IsZero, incorrect yaml", unionTest[notIsZeroer, isZeroer]{
yaml: `
key:
randomkey: hello`,
expectedUnmarshalErr: `ambiguous value: neither the basic or advanced form for the field was set`,
expectedYAML: `key: null`,
})
runUnionTest(t, "two structs, basic type supports IsZero, correct yaml", unionTest[isZeroer, notIsZeroer]{
yaml: `
key:
subkey: hello`,
expectedValue: BasicToUnion[isZeroer, notIsZeroer](isZeroer{"hello"}),
})
runUnionTest(t, "two structs, basic type supports IsZero, incorrect yaml", unionTest[isZeroer, notIsZeroer]{
yaml: `
key:
randomkey: hello`,
expectedUnmarshalErr: `ambiguous value: neither the basic or advanced form for the field was set`,
expectedYAML: `key: null`,
})
runUnionTest(t, "string or bool, is []string, error", unionTest[string, bool]{
yaml: `
key:
- asdf`,
expectedUnmarshalErr: `unmarshal to basic form string: yaml: unmarshal errors:
line 3: cannot unmarshal !!seq into string
unmarshal to advanced form bool: yaml: unmarshal errors:
line 3: cannot unmarshal !!seq into bool`,
expectedYAML: `key: null`,
})
runUnionTest(t, "bool or string, is []string, error", unionTest[bool, string]{
yaml: `
key:
- asdf`,
expectedUnmarshalErr: `unmarshal to basic form bool: yaml: unmarshal errors:
line 4: cannot unmarshal !!seq into bool
unmarshal to advanced form string: yaml: unmarshal errors:
line 4: cannot unmarshal !!seq into string`,
expectedYAML: `key: null`,
})
runUnionTest(t, "isZeroer or int, is random object, error", unionTest[isZeroer, int]{
yaml: `key:
randomkey: asdf`,
expectedUnmarshalErr: `unmarshal to basic form manifest.isZeroer: is zero
unmarshal to advanced form int: yaml: unmarshal errors:
line 2: cannot unmarshal !!map into int`,
expectedYAML: `key: null`,
})
runUnionTest(t, "[]string or semiComplexStruct, is []string", unionTest[[]string, semiComplexStruct]{
yaml: `
key:
- asdf`,
expectedValue: BasicToUnion[[]string, semiComplexStruct]([]string{"asdf"}),
})
runUnionTest(t, "[]string or semiComplexStruct, is semiComplexStruct", unionTest[[]string, semiComplexStruct]{
yaml: `
key:
bool: true
int: 420`,
expectedValue: AdvancedToUnion[[]string](semiComplexStruct{
Bool: true,
Int: 420,
}),
})
runUnionTest(t, "[]string or semiComplexStruct, is string, error", unionTest[[]string, semiComplexStruct]{
yaml: `key: asdf`,
expectedUnmarshalErr: "unmarshal to basic form []string: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `asdf` into []string\nunmarshal to advanced form manifest.semiComplexStruct: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `asdf` into manifest.semiComplexStruct",
expectedYAML: `key: null`,
})
runUnionTest(t, "string or semiComplexStruct, never instantiated", unionTest[string, semiComplexStruct]{
yaml: `wrongkey: asdf`,
expectedValue: Union[string, semiComplexStruct]{},
expectedYAML: `key: null`,
})
}
type keyValue[Basic, Advanced any] struct {
Key Union[Basic, Advanced] `yaml:"key"`
}
func runUnionTest[Basic, Advanced any](t *testing.T, name string, test unionTest[Basic, Advanced]) {
t.Run(name, func(t *testing.T) {
var kv keyValue[Basic, Advanced]
dec := yaml.NewDecoder(strings.NewReader(test.yaml))
err := dec.Decode(&kv)
if test.expectedUnmarshalErr != "" {
require.EqualError(t, err, test.expectedUnmarshalErr)
} else {
require.NoError(t, err)
}
require.Equal(t, test.expectedValue, kv.Key)
// call Marshal() with an indent of 2 spaces
buf := &bytes.Buffer{}
enc := yaml.NewEncoder(buf)
enc.SetIndent(2)
err = enc.Encode(kv)
require.NoError(t, err)
require.NoError(t, enc.Close())
expectedYAML := test.yaml
if test.expectedYAML != "" {
expectedYAML = test.expectedYAML
}
// verify the marshaled string matches the input string
require.Equal(t, strings.TrimSpace(expectedYAML), strings.TrimSpace(buf.String()))
})
}
func TestUnion_EmbeddedType(t *testing.T) {
type embeddedType struct {
Union[string, []string]
}
type keyValue struct {
Key embeddedType `yaml:"key,omitempty"`
}
// test []string
in := `
key:
- asdf
`
var kv keyValue
require.NoError(t, yaml.Unmarshal([]byte(in), &kv))
require.Equal(t, keyValue{
Key: embeddedType{AdvancedToUnion[string]([]string{
"asdf",
})},
}, kv)
// test string
in = `
key: qwerty
`
kv = keyValue{}
require.NoError(t, yaml.Unmarshal([]byte(in), &kv))
require.Equal(t, keyValue{
Key: embeddedType{BasicToUnion[string, []string]("qwerty")},
}, kv)
}
type semiComplexStruct struct {
Str string `yaml:"str,omitempty"`
Bool bool `yaml:"bool"`
Int int `yaml:"int"`
StrPtr *string `yaml:"str_ptr,omitempty"`
BoolPtr *bool `yaml:"bool_ptr,omitempty"`
IntPtr *int `yaml:"int_ptr,omitempty"`
}
type complexStruct struct {
StrPtr *string `yaml:"str_ptr,omitempty"`
SemiComplexStruct semiComplexStruct `yaml:"semi_complex_struct"`
}
type notIsZeroer struct {
SubKey string `yaml:"subkey"`
}
type isZeroer struct {
SubKey string `yaml:"subkey"`
}
func (a isZeroer) IsZero() bool {
return a.SubKey == ""
}
| 299 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"net"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/graph"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/dustin/go-humanize/english"
)
const (
// Container dependency status constants.
dependsOnStart = "START"
dependsOnComplete = "COMPLETE"
dependsOnSuccess = "SUCCESS"
dependsOnHealthy = "HEALTHY"
// Min and Max values for task ephemeral storage in GiB.
ephemeralMinValueGiB = 20
ephemeralMaxValueGiB = 200
envFileExt = ".env"
)
const (
// TCP is the tcp protocol for NLB.
TCP = "TCP"
// TLS is the tls protocol for NLB.
TLS = "TLS"
udp = "UDP"
// Tracing vendors.
awsXRAY = "awsxray"
)
const (
// Listener rules have a quota of five condition values per rule.
// Please refer to https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html.
maxConditionsPerRule = 5
rootPath = "/"
)
var (
intRangeBandRegexp = regexp.MustCompile(`^(\d+)-(\d+)$`)
volumesPathRegexp = regexp.MustCompile(`^[a-zA-Z0-9\-\.\_/]+$`)
awsSNSTopicRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]*$`) // Validates that an expression contains only letters, numbers, underscores, and hyphens.
awsNameRegexp = regexp.MustCompile(`^[a-z][a-z0-9\-]+$`) // Validates that an expression starts with a letter and only contains letters, numbers, and hyphens.
punctuationRegExp = regexp.MustCompile(`[\.\-]{2,}`) // Check for consecutive periods or dashes.
trailingPunctRegExp = regexp.MustCompile(`[\-\.]$`) // Check for trailing dash or dot.
essentialContainerDependsOnValidStatuses = []string{dependsOnStart, dependsOnHealthy}
dependsOnValidStatuses = []string{dependsOnStart, dependsOnComplete, dependsOnSuccess, dependsOnHealthy}
nlbValidProtocols = []string{TCP, TLS}
validContainerProtocols = []string{TCP, udp}
tracingValidVendors = []string{awsXRAY}
ecsRollingUpdateStrategies = []string{ECSDefaultRollingUpdateStrategy, ECSRecreateRollingUpdateStrategy}
httpProtocolVersions = []string{"GRPC", "HTTP1", "HTTP2"}
invalidTaskDefOverridePathRegexp = []string{`Family`, `ContainerDefinitions\[\d+\].Name`}
validSQSDeduplicationScopeValues = []string{sqsDeduplicationScopeMessageGroup, sqsDeduplicationScopeQueue}
validSQSFIFOThroughputLimitValues = []string{sqsFIFOThroughputLimitPerMessageGroupID, sqsFIFOThroughputLimitPerQueue}
)
// Validate returns nil if DynamicLoadBalancedWebService is configured correctly.
func (l *DynamicWorkloadManifest) Validate() error {
return l.mft.validate()
}
// validate returns nil if LoadBalancedWebService is configured correctly.
func (l LoadBalancedWebService) validate() error {
var err error
if err = l.LoadBalancedWebServiceConfig.validate(); err != nil {
return err
}
if err = l.Workload.validate(); err != nil {
return err
}
if err = validateTargetContainer(validateTargetContainerOpts{
mainContainerName: aws.StringValue(l.Name),
mainContainerPort: l.ImageConfig.Port,
targetContainer: l.HTTPOrBool.Main.TargetContainer,
sidecarConfig: l.Sidecars,
}); err != nil {
return fmt.Errorf(`validate load balancer target for "http": %w`, err)
}
for idx, rule := range l.HTTPOrBool.AdditionalRoutingRules {
if err = validateTargetContainer(validateTargetContainerOpts{
mainContainerName: aws.StringValue(l.Name),
mainContainerPort: l.ImageConfig.Port,
targetContainer: rule.TargetContainer,
sidecarConfig: l.Sidecars,
}); err != nil {
return fmt.Errorf(`validate load balancer target for "http.additional_rules[%d]": %w`, idx, err)
}
}
if err = validateTargetContainer(validateTargetContainerOpts{
mainContainerName: aws.StringValue(l.Name),
mainContainerPort: l.ImageConfig.Port,
targetContainer: l.NLBConfig.Listener.TargetContainer,
sidecarConfig: l.Sidecars,
}); err != nil {
return fmt.Errorf(`validate target for "nlb": %w`, err)
}
for idx, listener := range l.NLBConfig.AdditionalListeners {
if err = validateTargetContainer(validateTargetContainerOpts{
mainContainerName: aws.StringValue(l.Name),
mainContainerPort: l.ImageConfig.Port,
targetContainer: listener.TargetContainer,
sidecarConfig: l.Sidecars,
}); err != nil {
return fmt.Errorf(`validate target for "nlb.additional_listeners[%d]": %w`, idx, err)
}
}
if err = validateContainerDeps(validateDependenciesOpts{
sidecarConfig: l.Sidecars,
imageConfig: l.ImageConfig.Image,
mainContainerName: aws.StringValue(l.Name),
logging: l.Logging,
}); err != nil {
return fmt.Errorf("validate container dependencies: %w", err)
}
if err = validateExposedPorts(validateExposedPortsOpts{
mainContainerName: aws.StringValue(l.Name),
mainContainerPort: l.ImageConfig.Port,
sidecarConfig: l.Sidecars,
alb: &l.HTTPOrBool.HTTP,
nlb: &l.NLBConfig,
}); err != nil {
return fmt.Errorf("validate unique exposed ports: %w", err)
}
return nil
}
func (d DeploymentConfig) validate() error {
if d.isEmpty() {
return nil
}
if err := d.RollbackAlarms.validate(); err != nil {
return fmt.Errorf(`validate "rollback_alarms": %w`, err)
}
if err := d.DeploymentControllerConfig.validate(); err != nil {
return fmt.Errorf(`validate "rolling": %w`, err)
}
return nil
}
func (w WorkerDeploymentConfig) validate() error {
if w.isEmpty() {
return nil
}
if err := w.WorkerRollbackAlarms.validate(); err != nil {
return fmt.Errorf(`validate "rollback_alarms": %w`, err)
}
if err := w.DeploymentControllerConfig.validate(); err != nil {
return fmt.Errorf(`validate "deployment controller strategy": %w`, err)
}
return nil
}
func (d DeploymentControllerConfig) validate() error {
if d.Rolling != nil {
for _, validStrategy := range ecsRollingUpdateStrategies {
if strings.EqualFold(aws.StringValue(d.Rolling), validStrategy) {
return nil
}
}
return fmt.Errorf("invalid rolling deployment strategy %q, must be one of %s",
aws.StringValue(d.Rolling),
english.WordSeries(ecsRollingUpdateStrategies, "or"))
}
return nil
}
func (a AlarmArgs) validate() error {
return nil
}
func (w WorkerAlarmArgs) validate() error {
return nil
}
// validate returns nil if LoadBalancedWebServiceConfig is configured correctly.
func (l LoadBalancedWebServiceConfig) validate() error {
var err error
if l.HTTPOrBool.Disabled() && l.NLBConfig.IsEmpty() {
return &errAtLeastOneFieldMustBeSpecified{
missingFields: []string{"http", "nlb"},
}
}
if err = l.validateGracePeriod(); err != nil {
return fmt.Errorf(`validate "grace_period": %w`, err)
}
if l.HTTPOrBool.Disabled() && (!l.Count.AdvancedCount.Requests.IsEmpty() || !l.Count.AdvancedCount.ResponseTime.IsEmpty()) {
return errors.New(`scaling based on "nlb" requests or response time is not supported`)
}
if err = l.ImageConfig.validate(); err != nil {
return fmt.Errorf(`validate "image": %w`, err)
}
if err = l.ImageOverride.validate(); err != nil {
return err
}
if err = l.HTTPOrBool.validate(); err != nil {
return fmt.Errorf(`validate "http": %w`, err)
}
if err = l.TaskConfig.validate(); err != nil {
return err
}
if err = l.Logging.validate(); err != nil {
return fmt.Errorf(`validate "logging": %w`, err)
}
for k, v := range l.Sidecars {
if err = v.validate(); err != nil {
return fmt.Errorf(`validate "sidecars[%s]": %w`, k, err)
}
}
if err = l.Network.validate(); err != nil {
return fmt.Errorf(`validate "network": %w`, err)
}
if err = l.PublishConfig.validate(); err != nil {
return fmt.Errorf(`validate "publish": %w`, err)
}
for ind, taskDefOverride := range l.TaskDefOverrides {
if err = taskDefOverride.validate(); err != nil {
return fmt.Errorf(`validate "taskdef_overrides[%d]": %w`, ind, err)
}
}
if l.TaskConfig.IsWindows() {
if err = validateWindows(validateWindowsOpts{
efsVolumes: l.Storage.Volumes,
readOnlyFS: l.Storage.ReadonlyRootFS,
}); err != nil {
return fmt.Errorf("validate Windows: %w", err)
}
}
if l.TaskConfig.IsARM() {
if err = validateARM(validateARMOpts{
Spot: l.Count.AdvancedCount.Spot,
SpotFrom: l.Count.AdvancedCount.Range.RangeConfig.SpotFrom,
}); err != nil {
return fmt.Errorf("validate ARM: %w", err)
}
}
if err = l.NLBConfig.validate(); err != nil {
return fmt.Errorf(`validate "nlb": %w`, err)
}
if err = l.DeployConfig.validate(); err != nil {
return fmt.Errorf(`validate "deployment": %w`, err)
}
return nil
}
// validate returns nil if BackendService is configured correctly.
func (b BackendService) validate() error {
var err error
if err = b.DeployConfig.validate(); err != nil {
return fmt.Errorf(`validate "deployment": %w`, err)
}
if err = b.BackendServiceConfig.validate(); err != nil {
return err
}
if err = b.Workload.validate(); err != nil {
return err
}
if err = validateTargetContainer(validateTargetContainerOpts{
mainContainerName: aws.StringValue(b.Name),
mainContainerPort: b.ImageConfig.Port,
targetContainer: b.HTTP.Main.TargetContainer,
sidecarConfig: b.Sidecars,
}); err != nil {
return fmt.Errorf(`validate load balancer target for "http": %w`, err)
}
for idx, rule := range b.HTTP.AdditionalRoutingRules {
if err = validateTargetContainer(validateTargetContainerOpts{
mainContainerName: aws.StringValue(b.Name),
mainContainerPort: b.ImageConfig.Port,
targetContainer: rule.TargetContainer,
sidecarConfig: b.Sidecars,
}); err != nil {
return fmt.Errorf(`validate load balancer target for "http.additional_rules[%d]": %w`, idx, err)
}
}
if err = validateContainerDeps(validateDependenciesOpts{
sidecarConfig: b.Sidecars,
imageConfig: b.ImageConfig.Image,
mainContainerName: aws.StringValue(b.Name),
logging: b.Logging,
}); err != nil {
return fmt.Errorf("validate container dependencies: %w", err)
}
if err = validateExposedPorts(validateExposedPortsOpts{
mainContainerName: aws.StringValue(b.Name),
mainContainerPort: b.ImageConfig.Port,
sidecarConfig: b.Sidecars,
alb: &b.HTTP,
}); err != nil {
return fmt.Errorf("validate unique exposed ports: %w", err)
}
return nil
}
// validate returns nil if BackendServiceConfig is configured correctly.
func (b BackendServiceConfig) validate() error {
var err error
if err = b.ImageConfig.validate(); err != nil {
return fmt.Errorf(`validate "image": %w`, err)
}
if err = b.ImageOverride.validate(); err != nil {
return err
}
if err = b.HTTP.validate(); err != nil {
return fmt.Errorf(`validate "http": %w`, err)
}
if b.HTTP.IsEmpty() && (!b.Count.AdvancedCount.Requests.IsEmpty() || !b.Count.AdvancedCount.ResponseTime.IsEmpty()) {
return &errFieldMustBeSpecified{
missingField: "http",
conditionalFields: []string{"count.requests", "count.response_time"},
}
}
if err = b.TaskConfig.validate(); err != nil {
return err
}
if err = b.Logging.validate(); err != nil {
return fmt.Errorf(`validate "logging": %w`, err)
}
for k, v := range b.Sidecars {
if err = v.validate(); err != nil {
return fmt.Errorf(`validate "sidecars[%s]": %w`, k, err)
}
}
if err = b.Network.validate(); err != nil {
return fmt.Errorf(`validate "network": %w`, err)
}
if b.Network.Connect.Alias != nil {
if b.HTTP.Main.TargetContainer == nil && b.ImageConfig.Port == nil {
return fmt.Errorf(`cannot set "network.connect.alias" when no ports are exposed`)
}
}
if err = b.PublishConfig.validate(); err != nil {
return fmt.Errorf(`validate "publish": %w`, err)
}
for ind, taskDefOverride := range b.TaskDefOverrides {
if err = taskDefOverride.validate(); err != nil {
return fmt.Errorf(`validate "taskdef_overrides[%d]": %w`, ind, err)
}
}
if b.TaskConfig.IsWindows() {
if err = validateWindows(validateWindowsOpts{
efsVolumes: b.Storage.Volumes,
readOnlyFS: b.Storage.ReadonlyRootFS,
}); err != nil {
return fmt.Errorf("validate Windows: %w", err)
}
}
if b.TaskConfig.IsARM() {
if err = validateARM(validateARMOpts{
Spot: b.Count.AdvancedCount.Spot,
SpotFrom: b.Count.AdvancedCount.Range.RangeConfig.SpotFrom,
}); err != nil {
return fmt.Errorf("validate ARM: %w", err)
}
}
return nil
}
// validate returns nil if RequestDrivenWebService is configured correctly.
func (r RequestDrivenWebService) validate() error {
if err := r.RequestDrivenWebServiceConfig.validate(); err != nil {
return err
}
return r.Workload.validate()
}
// validate returns nil if RequestDrivenWebServiceConfig is configured correctly.
func (r RequestDrivenWebServiceConfig) validate() error {
var err error
if err = r.ImageConfig.validate(); err != nil {
return fmt.Errorf(`validate "image": %w`, err)
}
if err = r.InstanceConfig.validate(); err != nil {
return err
}
if err = r.RequestDrivenWebServiceHttpConfig.validate(); err != nil {
return fmt.Errorf(`validate "http": %w`, err)
}
if err = r.PublishConfig.validate(); err != nil {
return fmt.Errorf(`validate "publish": %w`, err)
}
if err = r.Network.validate(); err != nil {
return fmt.Errorf(`validate "network": %w`, err)
}
if r.Network.VPC.Placement.PlacementString != nil &&
*r.Network.VPC.Placement.PlacementString != PrivateSubnetPlacement {
return fmt.Errorf(`placement %q is not supported for %s`,
*r.Network.VPC.Placement.PlacementString, manifestinfo.RequestDrivenWebServiceType)
}
if err = r.Observability.validate(); err != nil {
return fmt.Errorf(`validate "observability": %w`, err)
}
return nil
}
// validate returns nil if WorkerService is configured correctly.
func (w WorkerService) validate() error {
var err error
if err = w.WorkerServiceConfig.validate(); err != nil {
return err
}
if err = w.Workload.validate(); err != nil {
return err
}
if err = validateContainerDeps(validateDependenciesOpts{
sidecarConfig: w.Sidecars,
imageConfig: w.ImageConfig.Image,
mainContainerName: aws.StringValue(w.Name),
logging: w.Logging,
}); err != nil {
return fmt.Errorf("validate container dependencies: %w", err)
}
if err = validateExposedPorts(validateExposedPortsOpts{
sidecarConfig: w.Sidecars,
}); err != nil {
return fmt.Errorf("validate unique exposed ports: %w", err)
}
return nil
}
// validate returns nil if WorkerServiceConfig is configured correctly.
func (w WorkerServiceConfig) validate() error {
var err error
if err = w.DeployConfig.validate(); err != nil {
return fmt.Errorf(`validate "deployment": %w`, err)
}
if err = w.ImageConfig.validate(); err != nil {
return fmt.Errorf(`validate "image": %w`, err)
}
if err = w.ImageOverride.validate(); err != nil {
return err
}
if err = w.TaskConfig.validate(); err != nil {
return err
}
if err = w.Logging.validate(); err != nil {
return fmt.Errorf(`validate "logging": %w`, err)
}
for k, v := range w.Sidecars {
if err = v.validate(); err != nil {
return fmt.Errorf(`validate "sidecars[%s]": %w`, k, err)
}
}
if err = w.Network.validate(); err != nil {
return fmt.Errorf(`validate "network": %w`, err)
}
if w.Network.Connect.Alias != nil {
return fmt.Errorf(`cannot set "network.connect.alias" when no ports are exposed`)
}
if err = w.Subscribe.validate(); err != nil {
return fmt.Errorf(`validate "subscribe": %w`, err)
}
if err = w.PublishConfig.validate(); err != nil {
return fmt.Errorf(`validate "publish": %w`, err)
}
for ind, taskDefOverride := range w.TaskDefOverrides {
if err = taskDefOverride.validate(); err != nil {
return fmt.Errorf(`validate "taskdef_overrides[%d]": %w`, ind, err)
}
}
if w.TaskConfig.IsWindows() {
if err = validateWindows(validateWindowsOpts{
efsVolumes: w.Storage.Volumes,
readOnlyFS: w.Storage.ReadonlyRootFS,
}); err != nil {
return fmt.Errorf(`validate Windows: %w`, err)
}
}
if w.TaskConfig.IsARM() {
if err = validateARM(validateARMOpts{
Spot: w.Count.AdvancedCount.Spot,
SpotFrom: w.Count.AdvancedCount.Range.RangeConfig.SpotFrom,
}); err != nil {
return fmt.Errorf("validate ARM: %w", err)
}
}
return nil
}
// validate returns nil if ScheduledJob is configured correctly.
func (s ScheduledJob) validate() error {
var err error
if err = s.ScheduledJobConfig.validate(); err != nil {
return err
}
if err = s.Workload.validate(); err != nil {
return err
}
if err = validateContainerDeps(validateDependenciesOpts{
sidecarConfig: s.Sidecars,
imageConfig: s.ImageConfig.Image,
mainContainerName: aws.StringValue(s.Name),
logging: s.Logging,
}); err != nil {
return fmt.Errorf("validate container dependencies: %w", err)
}
if err = validateExposedPorts(validateExposedPortsOpts{
sidecarConfig: s.Sidecars,
}); err != nil {
return fmt.Errorf("validate unique exposed ports: %w", err)
}
return nil
}
// validate returns nil if ScheduledJobConfig is configured correctly.
func (s ScheduledJobConfig) validate() error {
var err error
if err = s.ImageConfig.validate(); err != nil {
return fmt.Errorf(`validate "image": %w`, err)
}
if err = s.ImageOverride.validate(); err != nil {
return err
}
if err = s.TaskConfig.validate(); err != nil {
return err
}
if err = s.Logging.validate(); err != nil {
return fmt.Errorf(`validate "logging": %w`, err)
}
for k, v := range s.Sidecars {
if err = v.validate(); err != nil {
return fmt.Errorf(`validate "sidecars[%s]": %w`, k, err)
}
}
if err = s.Network.validate(); err != nil {
return fmt.Errorf(`validate "network": %w`, err)
}
if err = s.On.validate(); err != nil {
return fmt.Errorf(`validate "on": %w`, err)
}
if err = s.JobFailureHandlerConfig.validate(); err != nil {
return err
}
if err = s.PublishConfig.validate(); err != nil {
return fmt.Errorf(`validate "publish": %w`, err)
}
for ind, taskDefOverride := range s.TaskDefOverrides {
if err = taskDefOverride.validate(); err != nil {
return fmt.Errorf(`validate "taskdef_overrides[%d]": %w`, ind, err)
}
}
if s.TaskConfig.IsWindows() {
if err = validateWindows(validateWindowsOpts{
efsVolumes: s.Storage.Volumes,
readOnlyFS: s.Storage.ReadonlyRootFS,
}); err != nil {
return fmt.Errorf(`validate Windows: %w`, err)
}
}
if s.TaskConfig.IsARM() {
if err = validateARM(validateARMOpts{
Spot: s.Count.AdvancedCount.Spot,
SpotFrom: s.Count.AdvancedCount.Range.RangeConfig.SpotFrom,
}); err != nil {
return fmt.Errorf("validate ARM: %w", err)
}
}
return nil
}
// validate returns nil if StaticSite is configured correctly.
func (s StaticSite) validate() error {
if err := s.StaticSiteConfig.validate(); err != nil {
return err
}
return s.Workload.validate()
}
func (s StaticSiteConfig) validate() error {
for idx, fileupload := range s.FileUploads {
if err := fileupload.validate(); err != nil {
return fmt.Errorf(`validate "files[%d]": %w`, idx, err)
}
}
return nil
}
func (f FileUpload) validate() error {
return f.validateSource()
}
// validateSource returns nil if Source is configured correctly.
func (f FileUpload) validateSource() error {
if f.Source == "" {
return &errFieldMustBeSpecified{
missingField: "source",
}
}
return nil
}
// Validate returns nil if the pipeline manifest is configured correctly.
func (p Pipeline) Validate() error {
if len(p.Name) > 100 {
return fmt.Errorf(`pipeline name '%s' must be shorter than 100 characters`, p.Name)
}
for _, stg := range p.Stages {
if err := stg.Deployments.validate(); err != nil {
return fmt.Errorf(`validate "deployments" for pipeline stage %s: %w`, stg.Name, err)
}
}
return nil
}
// validate returns nil if deployments are configured correctly.
func (d Deployments) validate() error {
names := make(map[string]bool)
for name := range d {
names[name] = true
}
for name, conf := range d {
if conf == nil {
continue
}
for _, dependency := range conf.DependsOn {
if _, ok := names[dependency]; !ok {
return fmt.Errorf("dependency deployment named '%s' of '%s' does not exist", dependency, name)
}
}
}
return nil
}
// validate returns nil if Workload is configured correctly.
func (w Workload) validate() error {
if w.Name == nil {
return &errFieldMustBeSpecified{
missingField: "name",
}
}
return nil
}
// validate returns nil if ImageWithPortAndHealthcheck is configured correctly.
func (i ImageWithPortAndHealthcheck) validate() error {
var err error
if err = i.ImageWithPort.validate(); err != nil {
return err
}
if err = i.HealthCheck.validate(); err != nil {
return fmt.Errorf(`validate "healthcheck": %w`, err)
}
return nil
}
// validate returns nil if ImageWithHealthcheckAndOptionalPort is configured correctly.
func (i ImageWithHealthcheckAndOptionalPort) validate() error {
var err error
if err = i.ImageWithOptionalPort.validate(); err != nil {
return err
}
if err = i.HealthCheck.validate(); err != nil {
return fmt.Errorf(`validate "healthcheck": %w`, err)
}
return nil
}
// validate returns nil if ImageWithHealthcheck is configured correctly.
func (i ImageWithHealthcheck) validate() error {
if err := i.Image.validate(); err != nil {
return err
}
return nil
}
// validate returns nil if ImageWithOptionalPort is configured correctly.
func (i ImageWithOptionalPort) validate() error {
if err := i.Image.validate(); err != nil {
return err
}
return nil
}
// validate returns nil if ImageWithPort is configured correctly.
func (i ImageWithPort) validate() error {
if err := i.Image.validate(); err != nil {
return err
}
if i.Port == nil {
return &errFieldMustBeSpecified{
missingField: "port",
}
}
return nil
}
// validate returns nil if Image is configured correctly.
func (i Image) validate() error {
var err error
if err := i.ImageLocationOrBuild.validate(); err != nil {
return err
}
if i.Build.isEmpty() == (i.Location == nil) {
return &errFieldMutualExclusive{
firstField: "build",
secondField: "location",
mustExist: true,
}
}
if err = i.DependsOn.validate(); err != nil {
return fmt.Errorf(`validate "depends_on": %w`, err)
}
return nil
}
// validate returns nil if DependsOn is configured correctly.
func (d DependsOn) validate() error {
if d == nil {
return nil
}
for _, v := range d {
status := strings.ToUpper(v)
var isValid bool
for _, allowed := range dependsOnValidStatuses {
if status == allowed {
isValid = true
break
}
}
if !isValid {
return fmt.Errorf("container dependency status must be one of %s", english.WordSeries([]string{dependsOnStart, dependsOnComplete, dependsOnSuccess, dependsOnHealthy}, "or"))
}
}
return nil
}
// validate returns nil if BuildArgsOrString is configured correctly.
func (b BuildArgsOrString) validate() error {
if b.isEmpty() {
return nil
}
if !b.BuildArgs.isEmpty() {
return b.BuildArgs.validate()
}
return nil
}
// validate returns nil if DockerBuildArgs is configured correctly.
func (DockerBuildArgs) validate() error {
return nil
}
// validate returns nil if ContainerHealthCheck is configured correctly.
func (ContainerHealthCheck) validate() error {
return nil
}
// validate returns nil if ImageOverride is configured correctly.
func (i ImageOverride) validate() error {
var err error
if err = i.EntryPoint.validate(); err != nil {
return fmt.Errorf(`validate "entrypoint": %w`, err)
}
if err = i.Command.validate(); err != nil {
return fmt.Errorf(`validate "command": %w`, err)
}
return nil
}
// validate returns nil if EntryPointOverride is configured correctly.
func (EntryPointOverride) validate() error {
return nil
}
// validate returns nil if CommandOverride is configured correctly.
func (CommandOverride) validate() error {
return nil
}
// validate returns nil if HTTP is configured correctly.
func (r HTTP) validate() error {
if r.IsEmpty() {
return nil
}
// we consider the fact that primary routing rule is mandatory before you write any additional routing rules.
if err := r.Main.validate(); err != nil {
return err
}
if r.Main.TargetContainer != nil && r.TargetContainerCamelCase != nil {
return &errFieldMutualExclusive{
firstField: "target_container",
secondField: "targetContainer",
}
}
for idx, rule := range r.AdditionalRoutingRules {
if err := rule.validate(); err != nil {
return fmt.Errorf(`validate "additional_rules[%d]": %w`, idx, err)
}
}
return nil
}
// validate returns nil if HTTPOrBool is configured correctly.
func (r HTTPOrBool) validate() error {
if r.Disabled() {
return nil
}
return r.HTTP.validate()
}
func (l LoadBalancedWebServiceConfig) validateGracePeriod() error {
gracePeriodForALB, err := l.validateGracePeriodForALB()
if err != nil {
return err
}
gracePeriodForNLB, err := l.validateGracePeriodForNLB()
if err != nil {
return err
}
if gracePeriodForALB && gracePeriodForNLB {
return &errGracePeriodsInBothALBAndNLB{
errFieldMutualExclusive: errFieldMutualExclusive{
firstField: "http.healthcheck.grace_period",
secondField: "nlb.healthcheck.grace_period",
},
}
}
return nil
}
// validateGracePeriodForALB validates if ALB has grace period mentioned in their additional listeners rules.
func (cfg *LoadBalancedWebServiceConfig) validateGracePeriodForALB() (bool, error) {
var exist bool
if cfg.HTTPOrBool.Main.HealthCheck.Advanced.GracePeriod != nil {
exist = true
}
for idx, rule := range cfg.HTTPOrBool.AdditionalRoutingRules {
if rule.HealthCheck.Advanced.GracePeriod != nil {
return exist, &errGracePeriodSpecifiedInAdditionalRule{
index: idx,
}
}
}
return exist, nil
}
// validateGracePeriodForNLB validates if NLB has grace period mentioned in their additional listeners.
func (cfg *LoadBalancedWebServiceConfig) validateGracePeriodForNLB() (bool, error) {
var exist bool
if cfg.NLBConfig.Listener.HealthCheck.GracePeriod != nil {
exist = true
}
for idx, listener := range cfg.NLBConfig.AdditionalListeners {
if listener.HealthCheck.GracePeriod != nil {
return exist, &errGracePeriodSpecifiedInAdditionalListener{
index: idx,
}
}
}
return exist, nil
}
// validate returns nil if HTTP is configured correctly.
func (r RoutingRule) validate() error {
if r.Path == nil {
return &errFieldMustBeSpecified{
missingField: "path",
}
}
if err := r.HealthCheck.validate(); err != nil {
return fmt.Errorf(`validate "healthcheck": %w`, err)
}
if err := r.Alias.validate(); err != nil {
return fmt.Errorf(`validate "alias": %w`, err)
}
for ind, ip := range r.AllowedSourceIps {
if err := ip.validate(); err != nil {
return fmt.Errorf(`validate "allowed_source_ips[%d]": %w`, ind, err)
}
}
if r.ProtocolVersion != nil {
if !contains(strings.ToUpper(*r.ProtocolVersion), httpProtocolVersions) {
return fmt.Errorf(`"version" field value '%s' must be one of %s`, *r.ProtocolVersion, english.WordSeries(httpProtocolVersions, "or"))
}
}
if r.HostedZone != nil && r.Alias.IsEmpty() {
return &errFieldMustBeSpecified{
missingField: "alias",
conditionalFields: []string{"hosted_zone"},
}
}
if err := r.validateConditionValuesPerRule(); err != nil {
return fmt.Errorf("validate condition values per listener rule: %w", err)
}
return nil
}
// validate returns nil if HTTPHealthCheckArgs is configured correctly.
func (h HTTPHealthCheckArgs) validate() error {
return nil
}
// validate returns nil if NLBHealthCheckArgs is configured correctly.
func (h NLBHealthCheckArgs) validate() error {
if h.isEmpty() {
return nil
}
return nil
}
// validate returns nil if Alias is configured correctly.
func (a Alias) validate() error {
if a.IsEmpty() {
return nil
}
if err := a.StringSliceOrString.validate(); err != nil {
return err
}
for _, alias := range a.AdvancedAliases {
if err := alias.validate(); err != nil {
return err
}
}
return nil
}
// validate returns nil if AdvancedAlias is configured correctly.
func (a AdvancedAlias) validate() error {
if a.Alias == nil {
return &errFieldMustBeSpecified{
missingField: "name",
}
}
return nil
}
// validate is a no-op for StringSliceOrString.
func (StringSliceOrString) validate() error {
return nil
}
// validate returns nil if IPNet is configured correctly.
func (ip IPNet) validate() error {
if _, _, err := net.ParseCIDR(string(ip)); err != nil {
return fmt.Errorf("parse IPNet %s: %w", string(ip), err)
}
return nil
}
// validate returns nil if NetworkLoadBalancerConfiguration is configured correctly.
func (c NetworkLoadBalancerConfiguration) validate() error {
if c.IsEmpty() {
return nil
}
if err := c.Listener.validate(); err != nil {
return err
}
if err := c.Aliases.validate(); err != nil {
return fmt.Errorf(`validate "alias": %w`, err)
}
if !c.Aliases.IsEmpty() {
for _, advancedAlias := range c.Aliases.AdvancedAliases {
if advancedAlias.HostedZone != nil {
return fmt.Errorf(`"hosted_zone" is not supported for Network Load Balancer`)
}
}
}
for idx, listener := range c.AdditionalListeners {
if err := listener.validate(); err != nil {
return fmt.Errorf(`validate "additional_listeners[%d]": %w`, idx, err)
}
}
return nil
}
func (c NetworkLoadBalancerListener) validate() error {
if aws.StringValue(c.Port) == "" {
return &errFieldMustBeSpecified{
missingField: "port",
}
}
if err := validateNLBPort(c.Port); err != nil {
return fmt.Errorf(`validate "port": %w`, err)
}
if err := c.HealthCheck.validate(); err != nil {
return fmt.Errorf(`validate "healthcheck": %w`, err)
}
return nil
}
func validateNLBPort(port *string) error {
_, protocol, err := ParsePortMapping(port)
if err != nil {
return err
}
if protocol == nil {
return nil
}
protocolVal := aws.StringValue(protocol)
var isValidProtocol bool
for _, valid := range nlbValidProtocols {
if strings.EqualFold(protocolVal, valid) {
isValidProtocol = true
break
}
}
if !isValidProtocol {
return fmt.Errorf(`invalid protocol %s; valid protocols include %s`, protocolVal, english.WordSeries(nlbValidProtocols, "and"))
}
return nil
}
// validate returns nil if TaskConfig is configured correctly.
func (t TaskConfig) validate() error {
var err error
if err = t.Platform.validate(); err != nil {
return fmt.Errorf(`validate "platform": %w`, err)
}
if err = t.Count.validate(); err != nil {
return fmt.Errorf(`validate "count": %w`, err)
}
if err = t.ExecuteCommand.validate(); err != nil {
return fmt.Errorf(`validate "exec": %w`, err)
}
if err = t.Storage.validate(); err != nil {
return fmt.Errorf(`validate "storage": %w`, err)
}
for n, v := range t.Variables {
if err := v.validate(); err != nil {
return fmt.Errorf(`validate %q "variables": %w`, n, err)
}
}
for _, v := range t.Secrets {
if err := v.validate(); err != nil {
return fmt.Errorf(`validate "secret": %w`, err)
}
}
if t.EnvFile != nil {
envFile := aws.StringValue(t.EnvFile)
if filepath.Ext(envFile) != envFileExt {
return fmt.Errorf("environment file %s must have a %s file extension", envFile, envFileExt)
}
}
return nil
}
// validate returns nil if PlatformArgsOrString is configured correctly.
func (p PlatformArgsOrString) validate() error {
if p.IsEmpty() {
return nil
}
if !p.PlatformArgs.isEmpty() {
return p.PlatformArgs.validate()
}
if p.PlatformString != nil {
return p.PlatformString.validate()
}
return nil
}
// validate returns nil if PlatformArgs is configured correctly.
func (p PlatformArgs) validate() error {
if !p.bothSpecified() {
return errors.New(`fields "osfamily" and "architecture" must either both be specified or both be empty`)
}
var ss []string
for _, p := range validAdvancedPlatforms {
ss = append(ss, p.String())
}
prettyValidPlatforms := strings.Join(ss, ", ")
os := strings.ToLower(aws.StringValue(p.OSFamily))
arch := strings.ToLower(aws.StringValue(p.Arch))
for _, vap := range validAdvancedPlatforms {
if os == aws.StringValue(vap.OSFamily) && arch == aws.StringValue(vap.Arch) {
return nil
}
}
return fmt.Errorf("platform pair %s is invalid: fields ('osfamily', 'architecture') must be one of %s", p.String(), prettyValidPlatforms)
}
// validate returns nil if PlatformString is configured correctly.
func (p PlatformString) validate() error {
args := strings.Split(string(p), "/")
if len(args) != 2 {
return fmt.Errorf("platform '%s' must be in the format [OS]/[Arch]", string(p))
}
for _, validPlatform := range validShortPlatforms {
if strings.ToLower(string(p)) == validPlatform {
return nil
}
}
return fmt.Errorf("platform '%s' is invalid; %s: %s", p, english.PluralWord(len(validShortPlatforms), "the valid platform is", "valid platforms are"), english.WordSeries(validShortPlatforms, "and"))
}
// validate returns nil if Count is configured correctly.
func (c Count) validate() error {
return c.AdvancedCount.validate()
}
// validate returns nil if AdvancedCount is configured correctly.
func (a AdvancedCount) validate() error {
if a.IsEmpty() {
return nil
}
if len(a.validScalingFields()) == 0 {
return fmt.Errorf("cannot have autoscaling options for workloads of type '%s'", a.workloadType)
}
// validate if incorrect autoscaling fields are set
if fields := a.getInvalidFieldsSet(); fields != nil {
return &errInvalidAutoscalingFieldsWithWkldType{
invalidFields: fields,
workloadType: a.workloadType,
}
}
// validate spot and remaining autoscaling fields.
if a.Spot != nil && a.hasAutoscaling() {
return &errFieldMutualExclusive{
firstField: "spot",
secondField: fmt.Sprintf("range/%s", strings.Join(a.validScalingFields(), "/")),
}
}
if err := a.Range.validate(); err != nil {
return fmt.Errorf(`validate "range": %w`, err)
}
// validate combinations with "range".
if a.Range.IsEmpty() && a.hasScalingFieldsSet() {
return &errFieldMustBeSpecified{
missingField: "range",
conditionalFields: a.validScalingFields(),
}
}
if !a.Range.IsEmpty() && !a.hasScalingFieldsSet() {
return &errAtLeastOneFieldMustBeSpecified{
missingFields: a.validScalingFields(),
conditionalField: "range",
}
}
// validate combinations with cooldown
if !a.Cooldown.IsEmpty() && !a.hasScalingFieldsSet() {
return &errAtLeastOneFieldMustBeSpecified{
missingFields: a.validScalingFields(),
conditionalField: "cooldown",
}
}
// validate individual custom autoscaling options.
if err := a.QueueScaling.validate(); err != nil {
return fmt.Errorf(`validate "queue_delay": %w`, err)
}
if err := a.CPU.validate(); err != nil {
return fmt.Errorf(`validate "cpu_percentage": %w`, err)
}
if err := a.Memory.validate(); err != nil {
return fmt.Errorf(`validate "memory_percentage": %w`, err)
}
return nil
}
// validate returns nil if Percentage is configured correctly.
func (p Percentage) validate() error {
if val := int(p); val < 0 || val > 100 {
return fmt.Errorf("percentage value %v must be an integer from 0 to 100", val)
}
return nil
}
// validate returns nil if ScalingConfigOrT is configured correctly.
func (r ScalingConfigOrT[_]) validate() error {
if r.IsEmpty() {
return nil
}
if r.Value != nil {
switch any(r.Value).(type) {
case *Percentage:
return any(r.Value).(*Percentage).validate()
default:
return nil
}
}
return r.ScalingConfig.validate()
}
// validate returns nil if AdvancedScalingConfig is configured correctly.
func (r AdvancedScalingConfig[_]) validate() error {
if r.IsEmpty() {
return nil
}
switch any(r.Value).(type) {
case *Percentage:
if err := any(r.Value).(*Percentage).validate(); err != nil {
return err
}
}
return r.Cooldown.validate()
}
// Validation is a no-op for Cooldown.
func (c Cooldown) validate() error {
return nil
}
// validate returns nil if QueueScaling is configured correctly.
func (qs QueueScaling) validate() error {
if qs.IsEmpty() {
return nil
}
if qs.AcceptableLatency == nil && qs.AvgProcessingTime != nil {
return &errFieldMustBeSpecified{
missingField: "acceptable_latency",
conditionalFields: []string{"msg_processing_time"},
}
}
if qs.AvgProcessingTime == nil && qs.AcceptableLatency != nil {
return &errFieldMustBeSpecified{
missingField: "msg_processing_time",
conditionalFields: []string{"acceptable_latency"},
}
}
latency, process := *qs.AcceptableLatency, *qs.AvgProcessingTime
if latency == 0 {
return errors.New(`"acceptable_latency" cannot be 0`)
}
if process == 0 {
return errors.New(`"msg_processing_time" cannot be 0`)
}
if process > latency {
return errors.New(`"msg_processing_time" cannot be longer than "acceptable_latency"`)
}
return qs.Cooldown.validate()
}
// validate returns nil if Range is configured correctly.
func (r Range) validate() error {
if r.IsEmpty() {
return nil
}
if !r.RangeConfig.IsEmpty() {
return r.RangeConfig.validate()
}
return r.Value.validate()
}
type errInvalidRange struct {
value string
validFormat string
}
func (e *errInvalidRange) Error() string {
return fmt.Sprintf("invalid range value %s: valid format is %s", e.value, e.validFormat)
}
// validate returns nil if IntRangeBand is configured correctly.
func (r IntRangeBand) validate() error {
str := string(r)
minMax := intRangeBandRegexp.FindStringSubmatch(str)
// Valid minMax example: ["1-2", "1", "2"]
if len(minMax) != 3 {
return &errInvalidRange{
value: str,
validFormat: "${min}-${max}",
}
}
// Guaranteed by intRangeBandRegexp.
min, err := strconv.Atoi(minMax[1])
if err != nil {
return err
}
max, err := strconv.Atoi(minMax[2])
if err != nil {
return err
}
if min <= max {
return nil
}
return &errMinGreaterThanMax{
min: min,
max: max,
}
}
// validate returns nil if RangeConfig is configured correctly.
func (r RangeConfig) validate() error {
if r.Min == nil || r.Max == nil {
return &errFieldMustBeSpecified{
missingField: "min/max",
}
}
min, max, spotFrom := aws.IntValue(r.Min), aws.IntValue(r.Max), aws.IntValue(r.SpotFrom)
if min < 0 || max < 0 || spotFrom < 0 {
return &errRangeValueLessThanZero{
min: min,
max: max,
spotFrom: spotFrom,
}
}
if min <= max {
return nil
}
return &errMinGreaterThanMax{
min: min,
max: max,
}
}
// validate returns nil if ExecuteCommand is configured correctly.
func (e ExecuteCommand) validate() error {
if !e.Config.IsEmpty() {
return e.Config.validate()
}
return nil
}
// validate returns nil if ExecuteCommandConfig is configured correctly.
func (ExecuteCommandConfig) validate() error {
return nil
}
// validate returns nil if Storage is configured correctly.
func (s Storage) validate() error {
if s.IsEmpty() {
return nil
}
if s.Ephemeral != nil {
ephemeral := aws.IntValue(s.Ephemeral)
if ephemeral < ephemeralMinValueGiB || ephemeral > ephemeralMaxValueGiB {
return fmt.Errorf(`validate "ephemeral": ephemeral storage must be between 20 GiB and 200 GiB`)
}
}
var hasManagedVolume bool
for k, v := range s.Volumes {
if err := v.validate(); err != nil {
return fmt.Errorf(`validate "volumes[%s]": %w`, k, err)
}
if !v.EmptyVolume() && v.EFS.UseManagedFS() {
if hasManagedVolume {
return fmt.Errorf("cannot specify more than one managed volume per service")
}
hasManagedVolume = true
}
}
return nil
}
// validate returns nil if Volume is configured correctly.
func (v Volume) validate() error {
if err := v.EFS.validate(); err != nil {
return fmt.Errorf(`validate "efs": %w`, err)
}
return v.MountPointOpts.validate()
}
// validate returns nil if MountPointOpts is configured correctly.
func (m MountPointOpts) validate() error {
path := aws.StringValue(m.ContainerPath)
if path == "" {
return &errFieldMustBeSpecified{
missingField: "path",
}
}
if err := validateVolumePath(path); err != nil {
return fmt.Errorf(`validate "path": %w`, err)
}
return nil
}
// validate returns nil if EFSConfigOrBool is configured correctly.
func (e EFSConfigOrBool) validate() error {
if e.IsEmpty() {
return nil
}
return e.Advanced.validate()
}
// validate returns nil if EFSVolumeConfiguration is configured correctly.
func (e EFSVolumeConfiguration) validate() error {
if e.IsEmpty() {
return nil
}
if !e.EmptyBYOConfig() && !e.EmptyUIDConfig() {
return &errFieldMutualExclusive{
firstField: "uid/gid",
secondField: "id/root_dir/auth",
}
}
if e.UID != nil && e.GID == nil {
return &errFieldMustBeSpecified{
missingField: "gid",
conditionalFields: []string{"uid"},
}
}
if e.UID == nil && e.GID != nil {
return &errFieldMustBeSpecified{
missingField: "uid",
conditionalFields: []string{"gid"},
}
}
if e.UID != nil && *e.UID == 0 {
return fmt.Errorf(`"uid" must not be 0`)
}
if err := e.AuthConfig.validate(); err != nil {
return fmt.Errorf(`validate "auth": %w`, err)
}
if e.AuthConfig.AccessPointID != nil {
if (aws.StringValue(e.RootDirectory) == "" || aws.StringValue(e.RootDirectory) == "/") &&
(e.AuthConfig.IAM == nil || aws.BoolValue(e.AuthConfig.IAM)) {
return nil
}
return fmt.Errorf(`"root_dir" must be either empty or "/" and "auth.iam" must be true when "access_point_id" is used`)
}
if e.RootDirectory != nil {
if err := validateVolumePath(aws.StringValue(e.RootDirectory)); err != nil {
return fmt.Errorf(`validate "root_dir": %w`, err)
}
}
return nil
}
// validate returns nil if AuthorizationConfig is configured correctly.
func (a AuthorizationConfig) validate() error {
if a.IsEmpty() {
return nil
}
return nil
}
// validate returns nil if Logging is configured correctly.
func (l Logging) validate() error {
if l.IsEmpty() {
return nil
}
if l.EnvFile != nil {
envFile := aws.StringValue(l.EnvFile)
if filepath.Ext(envFile) != envFileExt {
return fmt.Errorf("environment file %s must have a %s file extension", envFile, envFileExt)
}
}
return nil
}
// validate returns nil if SidecarConfig is configured correctly.
func (s SidecarConfig) validate() error {
for ind, mp := range s.MountPoints {
if err := mp.validate(); err != nil {
return fmt.Errorf(`validate "mount_points[%d]": %w`, ind, err)
}
}
_, protocol, err := ParsePortMapping(s.Port)
if err != nil {
return err
}
if protocol != nil {
protocolVal := aws.StringValue(protocol)
var isValidProtocol bool
for _, valid := range validContainerProtocols {
if strings.EqualFold(protocolVal, valid) {
isValidProtocol = true
break
}
}
if !isValidProtocol {
return fmt.Errorf(`invalid protocol %s; valid protocols include %s`, protocolVal, english.WordSeries(validContainerProtocols, "and"))
}
}
if err := s.HealthCheck.validate(); err != nil {
return fmt.Errorf(`validate "healthcheck": %w`, err)
}
if err := s.DependsOn.validate(); err != nil {
return fmt.Errorf(`validate "depends_on": %w`, err)
}
if err := s.Image.Advanced.validate(); err != nil {
return fmt.Errorf(`validate "build": %w`, err)
}
if s.EnvFile != nil {
envFile := aws.StringValue(s.EnvFile)
if filepath.Ext(envFile) != envFileExt {
return fmt.Errorf("environment file %s must have a %s file extension", envFile, envFileExt)
}
}
return s.ImageOverride.validate()
}
// validate returns nil if SidecarMountPoint is configured correctly.
func (s SidecarMountPoint) validate() error {
if aws.StringValue(s.SourceVolume) == "" {
return &errFieldMustBeSpecified{
missingField: "source_volume",
}
}
return s.MountPointOpts.validate()
}
// validate returns nil if NetworkConfig is configured correctly.
func (n NetworkConfig) validate() error {
if n.IsEmpty() {
return nil
}
if err := n.VPC.validate(); err != nil {
return fmt.Errorf(`validate "vpc": %w`, err)
}
if err := n.Connect.validate(); err != nil {
return fmt.Errorf(`validate "connect": %w`, err)
}
return nil
}
// validate returns nil if ServiceConnectBoolOrArgs is configured correctly.
func (s ServiceConnectBoolOrArgs) validate() error {
return s.ServiceConnectArgs.validate()
}
// validate is a no-op for ServiceConnectArgs.
func (ServiceConnectArgs) validate() error {
return nil
}
// validate returns nil if RequestDrivenWebServiceNetworkConfig is configured correctly.
func (n RequestDrivenWebServiceNetworkConfig) validate() error {
if n.IsEmpty() {
return nil
}
if err := n.VPC.validate(); err != nil {
return fmt.Errorf(`validate "vpc": %w`, err)
}
return nil
}
// validate returns nil if rdwsVpcConfig is configured correctly.
func (v rdwsVpcConfig) validate() error {
if v.isEmpty() {
return nil
}
if err := v.Placement.validate(); err != nil {
return fmt.Errorf(`validate "placement": %w`, err)
}
return nil
}
// validate returns nil if vpcConfig is configured correctly.
func (v vpcConfig) validate() error {
if v.isEmpty() {
return nil
}
if err := v.Placement.validate(); err != nil {
return fmt.Errorf(`validate "placement": %w`, err)
}
if err := v.SecurityGroups.validate(); err != nil {
return fmt.Errorf(`validate "security_groups": %w`, err)
}
return nil
}
// validate returns nil if PlacementArgOrString is configured correctly.
func (p PlacementArgOrString) validate() error {
if p.IsEmpty() {
return nil
}
if p.PlacementString != nil {
return p.PlacementString.validate()
}
return p.PlacementArgs.validate()
}
// validate returns nil if PlacementArgs is configured correctly.
func (p PlacementArgs) validate() error {
if !p.Subnets.isEmpty() {
return p.Subnets.validate()
}
return nil
}
// validate returns nil if SubnetArgs is configured correctly.
func (s SubnetArgs) validate() error {
if s.isEmpty() {
return nil
}
return s.FromTags.validate()
}
// validate returns nil if Tags is configured correctly.
func (t Tags) validate() error {
for _, v := range t {
if err := v.validate(); err != nil {
return err
}
}
return nil
}
// validate returns nil if PlacementString is configured correctly.
func (p PlacementString) validate() error {
if string(p) == "" {
return fmt.Errorf(`"placement" cannot be empty`)
}
for _, allowed := range subnetPlacements {
if string(p) == allowed {
return nil
}
}
return fmt.Errorf(`"placement" %s must be one of %s`, string(p), strings.Join(subnetPlacements, ", "))
}
// validate is a no-op for SecurityGroupsIDsOrConfig.
func (s SecurityGroupsIDsOrConfig) validate() error {
if s.isEmpty() {
return nil
}
return s.AdvancedConfig.validate()
}
// validate is a no-op for SecurityGroupsConfig.
func (SecurityGroupsConfig) validate() error {
return nil
}
// validate returns nil if AppRunnerInstanceConfig is configured correctly.
func (r AppRunnerInstanceConfig) validate() error {
if err := r.Platform.validate(); err != nil {
return fmt.Errorf(`validate "platform": %w`, err)
}
// Error out if user added Windows as platform in manifest.
if isWindowsPlatform(r.Platform) {
return ErrAppRunnerInvalidPlatformWindows
}
// This extra check is because ARM architectures won't work for App Runner services.
if !r.Platform.IsEmpty() {
if r.Platform.Arch() != ArchAMD64 || r.Platform.Arch() != ArchX86 {
return fmt.Errorf("App Runner services can only build on %s and %s architectures", ArchAMD64, ArchX86)
}
}
return nil
}
// validate returns nil if RequestDrivenWebServiceHttpConfig is configured correctly.
func (r RequestDrivenWebServiceHttpConfig) validate() error {
if err := r.HealthCheckConfiguration.validate(); err != nil {
return err
}
return r.Private.validate()
}
func (v VPCEndpoint) validate() error {
return nil
}
// validate returns nil if Observability is configured correctly.
func (o Observability) validate() error {
if o.isEmpty() {
return nil
}
for _, validVendor := range tracingValidVendors {
if strings.EqualFold(aws.StringValue(o.Tracing), validVendor) {
return nil
}
}
return fmt.Errorf("invalid tracing vendor %s: %s %s",
aws.StringValue(o.Tracing),
english.PluralWord(len(tracingValidVendors), "the valid vendor is", "valid vendors are"),
english.WordSeries(tracingValidVendors, "and"))
}
// validate returns nil if JobTriggerConfig is configured correctly.
func (c JobTriggerConfig) validate() error {
if c.Schedule == nil {
return &errFieldMustBeSpecified{
missingField: "schedule",
}
}
return nil
}
// validate returns nil if JobFailureHandlerConfig is configured correctly.
func (JobFailureHandlerConfig) validate() error {
return nil
}
// validate returns nil if PublishConfig is configured correctly.
func (p PublishConfig) validate() error {
for ind, topic := range p.Topics {
if err := topic.validate(); err != nil {
return fmt.Errorf(`validate "topics[%d]": %w`, ind, err)
}
}
return nil
}
// validate returns nil if Topic is configured correctly.
func (t Topic) validate() error {
if err := validatePubSubName(aws.StringValue(t.Name)); err != nil {
return err
}
return t.FIFO.validate()
}
// validate returns nil if FIFOTopicAdvanceConfigOrBool is configured correctly.
func (f FIFOTopicAdvanceConfigOrBool) validate() error {
if f.IsEmpty() {
return nil
}
return f.Advanced.validate()
}
// validate returns nil if FIFOTopicAdvanceConfig is configured correctly.
func (a FIFOTopicAdvanceConfig) validate() error {
return nil
}
// validate returns nil if SubscribeConfig is configured correctly.
func (s SubscribeConfig) validate() error {
if s.IsEmpty() {
return nil
}
for ind, topic := range s.Topics {
if err := topic.validate(); err != nil {
return fmt.Errorf(`validate "topics[%d]": %w`, ind, err)
}
}
if err := s.Queue.validate(); err != nil {
return fmt.Errorf(`validate "queue": %w`, err)
}
return nil
}
// validate returns nil if TopicSubscription is configured correctly.
func (t TopicSubscription) validate() error {
if err := validatePubSubName(aws.StringValue(t.Name)); err != nil {
return err
}
svcName := aws.StringValue(t.Service)
if svcName == "" {
return &errFieldMustBeSpecified{
missingField: "service",
}
}
if !isValidSubSvcName(svcName) {
return fmt.Errorf("service name must start with a letter, contain only lower-case letters, numbers, and hyphens, and have no consecutive or trailing hyphen")
}
if err := t.Queue.validate(); err != nil {
return fmt.Errorf(`validate "queue": %w`, err)
}
return nil
}
// validate returns nil if SQSQueue is configured correctly.
func (q SQSQueueOrBool) validate() error {
if q.IsEmpty() {
return nil
}
return q.Advanced.validate()
}
// validate returns nil if SQSQueue is configured correctly.
func (q SQSQueue) validate() error {
if q.IsEmpty() {
return nil
}
if err := q.DeadLetter.validate(); err != nil {
return fmt.Errorf(`validate "dead_letter": %w`, err)
}
return q.FIFO.validate()
}
// validate returns nil if FIFOAdvanceConfig is configured correctly.
func (q FIFOAdvanceConfig) validate() error {
if q.IsEmpty() {
return nil
}
if err := q.validateHighThroughputFIFO(); err != nil {
return err
}
if err := q.validateDeduplicationScope(); err != nil {
return err
}
if err := q.validateFIFOThroughputLimit(); err != nil {
return err
}
if aws.StringValue(q.FIFOThroughputLimit) == sqsFIFOThroughputLimitPerMessageGroupID && aws.StringValue(q.DeduplicationScope) == sqsDeduplicationScopeQueue {
return fmt.Errorf(`"throughput_limit" must be set to "perQueue" when "deduplication_scope" is set to "queue"`)
}
return nil
}
// validateFIFO returns nil if FIFOAdvanceConfigOrBool is configured correctly.
func (q FIFOAdvanceConfigOrBool) validate() error {
if q.IsEmpty() {
return nil
}
return q.Advanced.validate()
}
func (q FIFOAdvanceConfig) validateHighThroughputFIFO() error {
if q.HighThroughputFifo == nil {
return nil
}
if q.FIFOThroughputLimit != nil {
return &errFieldMutualExclusive{
firstField: "high_throughput",
secondField: "throughput_limit",
mustExist: false,
}
}
if q.DeduplicationScope != nil {
return &errFieldMutualExclusive{
firstField: "high_throughput",
secondField: "deduplication_scope",
mustExist: false,
}
}
return nil
}
func (q FIFOAdvanceConfig) validateDeduplicationScope() error {
if q.DeduplicationScope != nil && !contains(aws.StringValue(q.DeduplicationScope), validSQSDeduplicationScopeValues) {
return fmt.Errorf(`validate "deduplication_scope": deduplication scope value must be one of %s`, english.WordSeries(validSQSDeduplicationScopeValues, "or"))
}
return nil
}
func (q FIFOAdvanceConfig) validateFIFOThroughputLimit() error {
if q.FIFOThroughputLimit != nil && !contains(aws.StringValue(q.FIFOThroughputLimit), validSQSFIFOThroughputLimitValues) {
return fmt.Errorf(`validate "throughput_limit": fifo throughput limit value must be one of %s`, english.WordSeries(validSQSFIFOThroughputLimitValues, "or"))
}
return nil
}
// validate returns nil if DeadLetterQueue is configured correctly.
func (d DeadLetterQueue) validate() error {
if d.IsEmpty() {
return nil
}
return nil
}
// validate returns nil if OverrideRule is configured correctly.
func (r OverrideRule) validate() error {
for _, s := range invalidTaskDefOverridePathRegexp {
re := regexp.MustCompile(fmt.Sprintf(`^%s$`, s))
if re.MatchString(r.Path) {
return fmt.Errorf(`"%s" cannot be overridden with a custom value`, s)
}
}
return nil
}
// validate returns nil if Variable is configured correctly.
func (v Variable) validate() error {
if err := v.FromCFN.validate(); err != nil {
return fmt.Errorf(`validate "from_cfn": %w`, err)
}
return nil
}
// validate returns nil if stringorFromCFN is configured correctly.
func (s stringOrFromCFN) validate() error {
if s.isEmpty() {
return nil
}
return s.FromCFN.validate()
}
// validate returns nil if fromCFN is configured correctly.
func (cfg fromCFN) validate() error {
if cfg.isEmpty() {
return nil
}
if len(aws.StringValue(cfg.Name)) == 0 {
return errors.New("name cannot be an empty string")
}
return nil
}
// validate is a no-op for Secrets.
func (s Secret) validate() error {
return nil
}
type validateExposedPortsOpts struct {
mainContainerName string
mainContainerPort *uint16
alb *HTTP
nlb *NetworkLoadBalancerConfiguration
sidecarConfig map[string]*SidecarConfig
}
type validateDependenciesOpts struct {
mainContainerName string
sidecarConfig map[string]*SidecarConfig
imageConfig Image
logging Logging
}
type containerDependency struct {
dependsOn DependsOn
isEssential bool
}
type validateTargetContainerOpts struct {
mainContainerName string
mainContainerPort *uint16
targetContainer *string
sidecarConfig map[string]*SidecarConfig
}
type validateWindowsOpts struct {
readOnlyFS *bool
efsVolumes map[string]*Volume
}
type validateARMOpts struct {
Spot *int
SpotFrom *int
}
func validateTargetContainer(opts validateTargetContainerOpts) error {
if opts.targetContainer == nil {
return nil
}
targetContainer := aws.StringValue(opts.targetContainer)
if targetContainer == opts.mainContainerName {
if opts.mainContainerPort == nil {
return fmt.Errorf("target container %q doesn't expose a port", targetContainer)
}
return nil
}
sidecar, ok := opts.sidecarConfig[targetContainer]
if !ok {
return fmt.Errorf("target container %q doesn't exist", targetContainer)
}
if sidecar.Port == nil {
return fmt.Errorf("target container %q doesn't expose a port", targetContainer)
}
return nil
}
func validateContainerDeps(opts validateDependenciesOpts) error {
containerDependencies := make(map[string]containerDependency)
containerDependencies[opts.mainContainerName] = containerDependency{
dependsOn: opts.imageConfig.DependsOn,
isEssential: true,
}
if !opts.logging.IsEmpty() {
containerDependencies[FirelensContainerName] = containerDependency{}
}
for name, config := range opts.sidecarConfig {
containerDependencies[name] = containerDependency{
dependsOn: config.DependsOn,
isEssential: config.Essential == nil || aws.BoolValue(config.Essential),
}
}
if err := validateDepsForEssentialContainers(containerDependencies); err != nil {
return err
}
return validateNoCircularDependencies(containerDependencies)
}
func validateDepsForEssentialContainers(deps map[string]containerDependency) error {
for name, containerDep := range deps {
for dep, status := range containerDep.dependsOn {
if !deps[dep].isEssential {
continue
}
if err := validateEssentialContainerDependency(dep, strings.ToUpper(status)); err != nil {
return fmt.Errorf("validate %s container dependencies status: %w", name, err)
}
}
}
return nil
}
func validateExposedPorts(opts validateExposedPortsOpts) error {
containerNameFor := make(map[uint16]string)
populateMainContainerPort(containerNameFor, opts)
if err := populateSidecarContainerPortsAndValidate(containerNameFor, opts); err != nil {
return err
}
if err := populateALBPortsAndValidate(containerNameFor, opts); err != nil {
return err
}
if err := populateNLBPortsAndValidate(containerNameFor, opts); err != nil {
return err
}
return nil
}
func populateMainContainerPort(containerNameFor map[uint16]string, opts validateExposedPortsOpts) {
if opts.mainContainerPort == nil {
return
}
containerNameFor[aws.Uint16Value(opts.mainContainerPort)] = opts.mainContainerName
}
func populateSidecarContainerPortsAndValidate(containerNameFor map[uint16]string, opts validateExposedPortsOpts) error {
for name, sidecar := range opts.sidecarConfig {
if sidecar.Port == nil {
continue
}
sidecarPort, _, err := ParsePortMapping(sidecar.Port)
if err != nil {
return err
}
port, err := strconv.ParseUint(aws.StringValue(sidecarPort), 10, 16)
if err != nil {
return err
}
if _, ok := containerNameFor[uint16(port)]; ok {
return &errContainersExposingSamePort{
firstContainer: name,
secondContainer: containerNameFor[uint16(port)],
port: uint16(port),
}
}
containerNameFor[uint16(port)] = name
}
return nil
}
func populateALBPortsAndValidate(containerNameFor map[uint16]string, opts validateExposedPortsOpts) error {
// This condition takes care of the use case where target_container is set to x container and
// target_port exposing port 80 which is already exposed by container y.That means container x
// is trying to expose the port that is already being exposed by container y, so error out.
if opts.alb == nil || opts.alb.IsEmpty() {
return nil
}
alb := opts.alb
for _, rule := range alb.RoutingRules() {
if rule.TargetPort == nil {
continue
}
if err := validateContainersNotExposingSamePort(containerNameFor, aws.Uint16Value(rule.TargetPort), rule.TargetContainer); err != nil {
return err
}
targetContainerName := opts.mainContainerName
if rule.TargetContainer != nil {
targetContainerName = aws.StringValue(rule.TargetContainer)
}
containerNameFor[aws.Uint16Value(rule.TargetPort)] = targetContainerName
}
return nil
}
func validateContainersNotExposingSamePort(containerNameFor map[uint16]string, targetPort uint16, targetContainer *string) error {
container, exists := containerNameFor[targetPort]
if !exists {
return nil
}
if targetContainer != nil && container != aws.StringValue(targetContainer) {
return &errContainersExposingSamePort{
firstContainer: aws.StringValue(targetContainer),
secondContainer: container,
port: targetPort,
}
}
return nil
}
func populateNLBPortsAndValidate(containerNameFor map[uint16]string, opts validateExposedPortsOpts) error {
if opts.nlb == nil || opts.nlb.IsEmpty() {
return nil
}
nlb := opts.nlb
if err := populateAndValidateNLBPorts(nlb.Listener, containerNameFor, opts.mainContainerName); err != nil {
return fmt.Errorf(`validate "nlb": %w`, err)
}
for idx, listener := range nlb.AdditionalListeners {
if err := populateAndValidateNLBPorts(listener, containerNameFor, opts.mainContainerName); err != nil {
return fmt.Errorf(`validate "nlb.additional_listeners[%d]": %w`, idx, err)
}
}
return nil
}
func populateAndValidateNLBPorts(listener NetworkLoadBalancerListener, containerNameFor map[uint16]string, mainContainerName string) error {
nlbPort, _, err := ParsePortMapping(listener.Port)
if err != nil {
return err
}
port, err := strconv.ParseUint(aws.StringValue(nlbPort), 10, 16)
if err != nil {
return err
}
targetPort := uint16(port)
if listener.TargetPort != nil {
targetPort = uint16(aws.IntValue(listener.TargetPort))
}
if err = validateContainersNotExposingSamePort(containerNameFor, uint16(aws.IntValue(listener.TargetPort)), listener.TargetContainer); err != nil {
return err
}
targetContainer := mainContainerName
if listener.TargetContainer != nil {
targetContainer = aws.StringValue(listener.TargetContainer)
}
containerNameFor[targetPort] = targetContainer
return nil
}
func validateEssentialContainerDependency(name, status string) error {
for _, allowed := range essentialContainerDependsOnValidStatuses {
if status == allowed {
return nil
}
}
return fmt.Errorf("essential container %s can only have status %s", name, english.WordSeries([]string{dependsOnStart, dependsOnHealthy}, "or"))
}
func validateNoCircularDependencies(deps map[string]containerDependency) error {
dependencies, err := buildDependencyGraph(deps)
if err != nil {
return err
}
cycle, ok := dependencies.IsAcyclic()
if ok {
return nil
}
if len(cycle) == 1 {
return fmt.Errorf("container %s cannot depend on itself", cycle[0])
}
// Stabilize unit tests.
sort.SliceStable(cycle, func(i, j int) bool { return cycle[i] < cycle[j] })
return fmt.Errorf("circular container dependency chain includes the following containers: %s", cycle)
}
func buildDependencyGraph(deps map[string]containerDependency) (*graph.Graph[string], error) {
dependencyGraph := graph.New[string]()
for name, containerDep := range deps {
for dep := range containerDep.dependsOn {
if _, ok := deps[dep]; !ok {
return nil, fmt.Errorf("container %s does not exist", dep)
}
dependencyGraph.Add(graph.Edge[string]{
From: name,
To: dep,
})
}
}
return dependencyGraph, nil
}
// validate that paths contain only an approved set of characters to guard against command injection.
// We can accept 0-9A-Za-z-_.
func validateVolumePath(input string) error {
if len(input) == 0 {
return nil
}
m := volumesPathRegexp.FindStringSubmatch(input)
if len(m) == 0 {
return fmt.Errorf("path can only contain the characters a-zA-Z0-9.-_/")
}
return nil
}
func validatePubSubName(name string) error {
if name == "" {
return &errFieldMustBeSpecified{
missingField: "name",
}
}
// Name must contain letters, numbers, and can't use special characters besides underscores, and hyphens.
if !awsSNSTopicRegexp.MatchString(name) {
return fmt.Errorf(`"name" can only contain letters, numbers, underscores, and hyphens`)
}
return nil
}
func isValidSubSvcName(name string) bool {
if !awsNameRegexp.MatchString(name) {
return false
}
// Check for bad punctuation (no consecutive dashes or dots)
formatMatch := punctuationRegExp.FindStringSubmatch(name)
if len(formatMatch) != 0 {
return false
}
trailingMatch := trailingPunctRegExp.FindStringSubmatch(name)
return len(trailingMatch) == 0
}
func validateWindows(opts validateWindowsOpts) error {
if aws.BoolValue(opts.readOnlyFS) {
return fmt.Errorf(`%q can not be set to 'true' when deploying a Windows container`, "readonly_fs")
}
for _, volume := range opts.efsVolumes {
if !volume.EmptyVolume() {
return errors.New(`'EFS' is not supported when deploying a Windows container`)
}
}
return nil
}
func validateARM(opts validateARMOpts) error {
if opts.Spot != nil || opts.SpotFrom != nil {
return errors.New(`'Fargate Spot' is not supported when deploying on ARM architecture`)
}
return nil
}
func contains(name string, names []string) bool {
for _, n := range names {
if name == n {
return true
}
}
return false
}
// validate returns nil if ImageLocationOrBuild is configured correctly.
func (i ImageLocationOrBuild) validate() error {
if err := i.Build.validate(); err != nil {
return fmt.Errorf(`validate "build": %w`, err)
}
if !i.Build.isEmpty() && i.Location != nil {
return &errFieldMutualExclusive{
firstField: "build",
secondField: "location",
mustExist: true,
}
}
return nil
}
func (r *RoutingRule) validateConditionValuesPerRule() error {
aliases, err := r.Alias.ToStringSlice()
if err != nil {
return fmt.Errorf("convert aliases to string slice: %w", err)
}
allowedSourceIps := make([]string, len(r.AllowedSourceIps))
for idx, ip := range r.AllowedSourceIps {
allowedSourceIps[idx] = string(ip)
}
if len(aliases)+len(allowedSourceIps) >= maxConditionsPerRule {
return &errMaxConditionValuesPerRule{
path: aws.StringValue(r.Path),
aliases: aliases,
allowedSourceIps: allowedSourceIps,
}
}
return nil
}
type errMaxConditionValuesPerRule struct {
path string
aliases []string
allowedSourceIps []string
}
func (e *errMaxConditionValuesPerRule) Error() string {
return fmt.Sprintf("listener rule has more than five conditions %s %s", english.WordSeries(e.aliases, "and"),
english.WordSeries(e.allowedSourceIps, "and"))
}
func (e *errMaxConditionValuesPerRule) RecommendActions() string {
cgList := e.generateConditionGroups()
var fmtListenerRules strings.Builder
fmtListenerRules.WriteString(fmt.Sprintf(`http:
path: %s
alias: %s
allowed_source_ips: %s
additional_rules:`, e.path, fmtStringArray(cgList[0].aliases), fmtStringArray(cgList[0].allowedSourceIps)))
for i := 1; i < len(cgList); i++ {
fmtListenerRules.WriteString(fmt.Sprintf(`
- path: %s
alias: %s
allowed_source_ips: %s`, e.path, fmtStringArray(cgList[i].aliases), fmtStringArray(cgList[i].allowedSourceIps)))
}
return fmt.Sprintf(`You can split the "alias" and "allowed_source_ips" field into separate rules, so that each rule contains up to 5 values:
%s`, color.HighlightCodeBlock(fmtListenerRules.String()))
}
func fmtStringArray(arr []string) string {
return fmt.Sprintf("[%s]", strings.Join(arr, ","))
}
// conditionGroup represents groups of conditions per listener rule.
type conditionGroup struct {
allowedSourceIps []string
aliases []string
}
func (e *errMaxConditionValuesPerRule) generateConditionGroups() []conditionGroup {
remaining := calculateRemainingConditions(e.path)
if len(e.aliases) != 0 && len(e.allowedSourceIps) != 0 {
return e.generateConditionsWithSourceIPsAndAlias(remaining)
}
if len(e.aliases) != 0 {
return e.generateConditionsWithAliasOnly(remaining)
}
return e.generateConditionWithSourceIPsOnly(remaining)
}
func calculateRemainingConditions(path string) int {
rcPerRule := maxConditionsPerRule
if path != rootPath {
return rcPerRule - 2
}
return rcPerRule - 1
}
func (e *errMaxConditionValuesPerRule) generateConditionsWithSourceIPsAndAlias(remaining int) []conditionGroup {
var groups []conditionGroup
for i := 0; i < len(e.allowedSourceIps); i++ {
var group conditionGroup
group.allowedSourceIps = []string{e.allowedSourceIps[i]}
groups = append(groups, e.generateConditionsGroups(remaining-1, true, group)...)
}
return groups
}
func (e *errMaxConditionValuesPerRule) generateConditionsWithAliasOnly(remaining int) []conditionGroup {
var group conditionGroup
return e.generateConditionsGroups(remaining, true, group)
}
func (e *errMaxConditionValuesPerRule) generateConditionWithSourceIPsOnly(remaining int) []conditionGroup {
var group conditionGroup
return e.generateConditionsGroups(remaining, false, group)
}
func (e *errMaxConditionValuesPerRule) generateConditionsGroups(remaining int, isAlias bool, group conditionGroup) []conditionGroup {
var groups []conditionGroup
var conditions []string
if isAlias {
conditions = e.aliases
} else {
conditions = e.allowedSourceIps
}
for i := 0; i < len(conditions); i += remaining {
end := i + remaining
if end > len(conditions) {
end = len(conditions)
}
if isAlias {
group.aliases = conditions[i:end]
groups = append(groups, group)
continue
}
group.allowedSourceIps = conditions[i:end]
groups = append(groups, group)
}
return groups
}
| 2,365 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudfront"
)
var (
errAZsNotEqual = errors.New("public subnets and private subnets do not span the same availability zones")
minAZs = 2
)
// Validate returns nil if Environment is configured correctly.
func (e Environment) Validate() error {
if err := e.EnvironmentConfig.validate(); err != nil {
return err
}
return nil
}
// validate returns nil if EnvironmentConfig is configured correctly.
func (e EnvironmentConfig) validate() error {
if err := e.Network.validate(); err != nil {
return fmt.Errorf(`validate "network": %w`, err)
}
if err := e.Observability.validate(); err != nil {
return fmt.Errorf(`validate "observability": %w`, err)
}
if err := e.HTTPConfig.validate(); err != nil {
return fmt.Errorf(`validate "http config": %w`, err)
}
if err := e.Network.VPC.SecurityGroupConfig.validate(); err != nil {
return fmt.Errorf(`validate "security_group": %w`, err)
}
if err := e.CDNConfig.validate(); err != nil {
return fmt.Errorf(`validate "cdn": %w`, err)
}
if e.IsPublicLBIngressRestrictedToCDN() && !e.CDNEnabled() {
return errors.New("CDN must be enabled to limit security group ingress to CloudFront")
}
if e.CDNEnabled() {
cdnCert := e.CDNConfig.Config.Certificate
if e.HTTPConfig.Public.Certificates == nil {
if cdnCert != nil && !aws.BoolValue(e.CDNConfig.Config.TerminateTLS) {
return errors.New(`"cdn.terminate_tls" must be true if "cdn.certificate" is set without "http.public.certificates"`)
}
} else {
if cdnCert == nil {
return &errFieldMustBeSpecified{
missingField: "cdn.certificate",
conditionalFields: []string{"http.public.certificates", "cdn"},
allMustBeSpecified: true,
}
}
}
}
if e.HTTPConfig.Private.InternalALBSubnets != nil {
if !e.Network.VPC.imported() {
return errors.New("in order to specify internal ALB subnet placement, subnets must be imported")
}
if err := e.validateInternalALBSubnets(); err != nil {
return err
}
}
return nil
}
// validate returns nil if environmentNetworkConfig is configured correctly.
func (n environmentNetworkConfig) validate() error {
if err := n.VPC.validate(); err != nil {
return fmt.Errorf(`validate "vpc": %w`, err)
}
return nil
}
// validate returns nil if environmentVPCConfig is configured correctly.
func (cfg environmentVPCConfig) validate() error {
if cfg.imported() && cfg.managedVPCCustomized() {
return errors.New(`cannot import VPC resources (with "id" fields) and customize VPC resources (with "cidr" and "az" fields) at the same time`)
}
if err := cfg.Subnets.validate(); err != nil {
return fmt.Errorf(`validate "subnets": %w`, err)
}
if cfg.imported() {
if err := cfg.validateImportedVPC(); err != nil {
return fmt.Errorf(`validate "subnets" for an imported VPC: %w`, err)
}
}
if cfg.managedVPCCustomized() {
if err := cfg.validateManagedVPC(); err != nil {
return fmt.Errorf(`validate "subnets" for an adjusted VPC: %w`, err)
}
}
if err := cfg.FlowLogs.validate(); err != nil {
return fmt.Errorf(`validate vpc "flowlogs": %w`, err)
}
return nil
}
// validate returns nil if securityGroupRule has all the required parameters set.
func (cfg securityGroupRule) validate() error {
if cfg.CidrIP == "" {
return &errFieldMustBeSpecified{
missingField: "cidr",
}
}
if cfg.IpProtocol == "" {
return &errFieldMustBeSpecified{
missingField: "ip_protocol",
}
}
return cfg.Ports.validate()
}
// validate if ports are set.
func (cfg portsConfig) validate() error {
if cfg.IsEmpty() {
return &errFieldMustBeSpecified{
missingField: "ports",
}
}
if cfg.Range == nil {
return nil
}
if err := cfg.Range.validate(); err != nil {
var targetErr *errInvalidRange
if errors.As(err, &targetErr) {
return &errInvalidRange{
value: aws.StringValue((*string)(cfg.Range)),
validFormat: "${from_port}-${to_port}",
}
}
return err
}
return nil
}
// validate returns nil if securityGroupConfig is configured correctly.
func (cfg securityGroupConfig) validate() error {
for idx, ingress := range cfg.Ingress {
if err := ingress.validate(); err != nil {
return fmt.Errorf(`validate ingress[%d]: %w`, idx, err)
}
}
for idx, egress := range cfg.Egress {
if err := egress.validate(); err != nil {
return fmt.Errorf(`validate egress[%d]: %w`, idx, err)
}
}
return nil
}
func (cfg environmentVPCConfig) validateImportedVPC() error {
for idx, subnet := range cfg.Subnets.Public {
if aws.StringValue(subnet.SubnetID) == "" {
return fmt.Errorf(`validate public[%d]: %w`, idx, &errFieldMustBeSpecified{
missingField: "id",
})
}
}
for idx, subnet := range cfg.Subnets.Private {
if aws.StringValue(subnet.SubnetID) == "" {
return fmt.Errorf(`validate private[%d]: %w`, idx, &errFieldMustBeSpecified{
missingField: "id",
})
}
}
switch {
case len(cfg.Subnets.Private)+len(cfg.Subnets.Public) <= 0:
return errors.New(`VPC must have subnets in order to proceed with environment creation`)
case len(cfg.Subnets.Public) == 1:
return errors.New(`validate "public": at least two public subnets must be imported to enable Load Balancing`)
case len(cfg.Subnets.Private) == 1:
return errors.New(`validate "private": at least two private subnets must be imported`)
}
return nil
}
func (cfg environmentVPCConfig) validateManagedVPC() error {
var (
publicAZs = make(map[string]struct{})
privateAZs = make(map[string]struct{})
publicCIDRs = make(map[string]struct{})
privateCIDRs = make(map[string]struct{})
)
var exists = struct{}{}
for idx, subnet := range cfg.Subnets.Public {
if aws.StringValue((*string)(subnet.CIDR)) == "" {
return fmt.Errorf(`validate public[%d]: %w`, idx, &errFieldMustBeSpecified{
missingField: "cidr",
})
}
publicCIDRs[aws.StringValue((*string)(subnet.CIDR))] = exists
if aws.StringValue(subnet.AZ) != "" {
publicAZs[aws.StringValue(subnet.AZ)] = exists
}
}
for idx, subnet := range cfg.Subnets.Private {
if aws.StringValue((*string)(subnet.CIDR)) == "" {
return fmt.Errorf(`validate private[%d]: %w`, idx, &errFieldMustBeSpecified{
missingField: "cidr",
})
}
privateCIDRs[aws.StringValue((*string)(subnet.CIDR))] = exists
if aws.StringValue(subnet.AZ) != "" {
privateAZs[aws.StringValue(subnet.AZ)] = exists
}
}
// NOTE: the following are constraints on az:
// 1. #az = 0, or #az = #public_subnets = #private_subnets.
// 2. set(az_for_public) = set(az_for_private).
// 3, If configured at all, the number of AZ must be >= 2.
if !areSetsEqual(publicAZs, privateAZs) {
return errAZsNotEqual
}
numAZs := len(publicAZs)
if numAZs == 0 {
return nil
}
if numAZs < minAZs {
return fmt.Errorf(`require at least %d availability zones`, minAZs)
}
if len(publicCIDRs) != numAZs {
return fmt.Errorf(`validate "public": number of public subnet CIDRs (%d) does not match number of AZs (%d)`, len(publicCIDRs), len(publicAZs))
}
if len(privateCIDRs) != numAZs {
return fmt.Errorf(`validate "private": number of private subnet CIDRs (%d) does not match number of AZs (%d)`, len(privateCIDRs), len(publicAZs))
}
return nil
}
// validate returns nil if subnetsConfiguration is configured correctly.
func (cs subnetsConfiguration) validate() error {
for idx, subnet := range cs.Public {
if err := subnet.validate(); err != nil {
return fmt.Errorf(`validate "public[%d]": %w`, idx, err)
}
}
for idx, subnet := range cs.Private {
if err := subnet.validate(); err != nil {
return fmt.Errorf(`validate "private[%d]": %w`, idx, err)
}
}
return nil
}
// validate returns nil if subnetConfiguration is configured correctly.
func (c subnetConfiguration) validate() error {
if c.SubnetID != nil && c.CIDR != nil {
return &errFieldMutualExclusive{
firstField: "id",
secondField: "cidr",
mustExist: false,
}
}
if c.SubnetID != nil && c.AZ != nil {
return &errFieldMutualExclusive{
firstField: "id",
secondField: "az",
mustExist: false,
}
}
return nil
}
// validate is a no-op for VPCFlowLogsArgs.
func (fl VPCFlowLogsArgs) validate() error {
return nil
}
// validate returns nil if environmentObservability is configured correctly.
func (o environmentObservability) validate() error {
return nil
}
// validate returns nil if EnvironmentHTTPConfig is configured correctly.
func (cfg EnvironmentHTTPConfig) validate() error {
if err := cfg.Public.validate(); err != nil {
return fmt.Errorf(`validate "public": %w`, err)
}
if err := cfg.Private.validate(); err != nil {
return fmt.Errorf(`validate "private": %w`, err)
}
return nil
}
// validate returns nil if PublicHTTPConfig is configured correctly.
func (cfg PublicHTTPConfig) validate() error {
if !cfg.DeprecatedSG.DeprecatedIngress.IsEmpty() && !cfg.Ingress.IsEmpty() {
return &errSpecifiedBothIngressFields{
firstField: "public.http.security_groups.ingress",
secondField: "public.http.ingress",
}
}
for idx, certARN := range cfg.Certificates {
if _, err := arn.Parse(certARN); err != nil {
return fmt.Errorf(`parse "certificates[%d]": %w`, idx, err)
}
}
if cfg.DeprecatedSG.DeprecatedIngress.VPCIngress != nil {
return fmt.Errorf("a public load balancer already allows vpc ingress")
}
if err := cfg.ELBAccessLogs.validate(); err != nil {
return fmt.Errorf(`validate "access_logs": %w`, err)
}
if err := cfg.DeprecatedSG.validate(); err != nil {
return err
}
return cfg.Ingress.validate()
}
// validate returns nil if ELBAccessLogsArgsOrBool is configured correctly.
func (al ELBAccessLogsArgsOrBool) validate() error {
if al.isEmpty() {
return nil
}
return al.AdvancedConfig.validate()
}
// validate is a no-op for ELBAccessLogsArgs.
func (al ELBAccessLogsArgs) validate() error {
return nil
}
// validate returns nil if ALBSecurityGroupsConfig is configured correctly.
func (cfg DeprecatedALBSecurityGroupsConfig) validate() error {
return cfg.DeprecatedIngress.validate()
}
// validate returns nil if privateHTTPConfig is configured correctly.
func (cfg privateHTTPConfig) validate() error {
if !cfg.DeprecatedSG.DeprecatedIngress.IsEmpty() && !cfg.Ingress.IsEmpty() {
return &errSpecifiedBothIngressFields{
firstField: "private.http.security_groups.ingress",
secondField: "private.http.ingress",
}
}
for idx, certARN := range cfg.Certificates {
if _, err := arn.Parse(certARN); err != nil {
return fmt.Errorf(`parse "certificates[%d]": %w`, idx, err)
}
}
if !cfg.DeprecatedSG.DeprecatedIngress.RestrictiveIngress.IsEmpty() {
return fmt.Errorf("an internal load balancer cannot have restrictive ingress fields")
}
if err := cfg.DeprecatedSG.validate(); err != nil {
return fmt.Errorf(`validate "security_groups: %w`, err)
}
return cfg.Ingress.validate()
}
// validate returns nil if environmentCDNConfig is configured correctly.
func (cfg EnvironmentCDNConfig) validate() error {
if cfg.Config.isEmpty() {
return nil
}
return cfg.Config.validate()
}
// validate returns nil if Ingress is configured correctly.
func (i DeprecatedIngress) validate() error {
if i.IsEmpty() {
return nil
}
return i.RestrictiveIngress.validate()
}
// validate returns nil if RestrictiveIngress is configured correctly.
func (i RestrictiveIngress) validate() error {
for _, sourceIP := range i.SourceIPs {
if err := sourceIP.validate(); err != nil {
return err
}
}
return nil
}
// validate is a no-op for RelaxedIngress.
func (i RelaxedIngress) validate() error {
return nil
}
// validate returns nil if advancedCDNConfig is configured correctly.
func (cfg AdvancedCDNConfig) validate() error {
if cfg.Certificate != nil {
certARN, err := arn.Parse(*cfg.Certificate)
if err != nil {
return fmt.Errorf(`parse cdn certificate: %w`, err)
}
if certARN.Region != cloudfront.CertRegion {
return &errInvalidCloudFrontRegion{}
}
}
if err := cfg.Static.validate(); err != nil {
return fmt.Errorf(`validate "static_assets": %w`, err)
}
return nil
}
// validate returns nil if CDNStaticConfig is configured correctly.
func (cfg CDNStaticConfig) validate() error {
if cfg.IsEmpty() {
return nil
}
if cfg.Alias == "" {
return &errFieldMustBeSpecified{
missingField: "alias",
}
}
if cfg.Location == "" {
return &errFieldMustBeSpecified{
missingField: "location",
}
}
if cfg.Path == "" {
return &errFieldMustBeSpecified{
missingField: "path",
}
}
return nil
}
func (c EnvironmentConfig) validateInternalALBSubnets() error {
isImported := make(map[string]bool)
for _, placementSubnet := range c.HTTPConfig.Private.InternalALBSubnets {
for _, subnet := range append(c.Network.VPC.Subnets.Private, c.Network.VPC.Subnets.Public...) {
if placementSubnet == aws.StringValue(subnet.SubnetID) {
isImported[placementSubnet] = true
}
}
}
if len(isImported) != len(c.HTTPConfig.Private.InternalALBSubnets) {
return fmt.Errorf("subnet(s) specified for internal ALB placement not imported")
}
return nil
}
func areSetsEqual[T comparable](a map[T]struct{}, b map[T]struct{}) bool {
if len(a) != len(b) {
return false
}
for k := range a {
if _, ok := b[k]; !ok {
return false
}
}
return true
}
| 458 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/require"
)
func TestEnvironment_Validate(t *testing.T) {
mockVPCCIDR := IPNet("10.0.0.0/16")
testCases := map[string]struct {
in Environment
wantedErrorMsgPrefix string
}{
"malformed network": {
in: Environment{
EnvironmentConfig: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
ID: stringP("vpc-123"),
CIDR: &mockVPCCIDR,
},
},
},
},
wantedErrorMsgPrefix: `validate "network": `,
},
"succeed on empty config": {},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.Validate()
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestEnvironmentConfig_validate(t *testing.T) {
mockPublicSubnet1CIDR := IPNet("10.0.0.0/24")
mockPublicSubnet2CIDR := IPNet("10.0.1.0/24")
mockPrivateSubnet1CIDR := IPNet("10.0.3.0/24")
mockPrivateSubnet2CIDR := IPNet("10.0.4.0/24")
testCases := map[string]struct {
in EnvironmentConfig
wantedError string
}{
"error if internal ALB subnet placement specified with adjusted vpc": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
CIDR: ipNetP("apple cider"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-1b"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-1b"),
},
},
},
},
},
HTTPConfig: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
InternalALBSubnets: []string{"mockSubnet"},
},
},
},
wantedError: "in order to specify internal ALB subnet placement, subnets must be imported",
},
"error if invalid security group config": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
SecurityGroupConfig: securityGroupConfig{
Ingress: []securityGroupRule{
{
IpProtocol: "tcp",
Ports: portsConfig{
Port: aws.Int(80),
},
},
},
},
},
},
},
wantedError: "validate \"security_group\": validate ingress[0]: \"cidr\" must be specified",
},
"valid security group config": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
SecurityGroupConfig: securityGroupConfig{
Ingress: []securityGroupRule{
{
CidrIP: "0.0.0.0",
IpProtocol: "tcp",
Ports: portsConfig{
Range: (*IntRangeBand)(aws.String("1-10")),
},
},
},
},
},
},
},
},
"invalid ports value in security group config": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
SecurityGroupConfig: securityGroupConfig{
Ingress: []securityGroupRule{
{
CidrIP: "0.0.0.0",
IpProtocol: "tcp",
Ports: portsConfig{
Range: (*IntRangeBand)(aws.String("1-10-10")),
},
},
},
},
},
},
},
wantedError: "validate \"security_group\": validate ingress[0]: invalid range value 1-10-10: valid format is ${from_port}-${to_port}",
},
"valid security group config without ports": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
SecurityGroupConfig: securityGroupConfig{
Ingress: []securityGroupRule{
{
CidrIP: "0.0.0.0",
IpProtocol: "tcp",
},
},
},
},
},
},
wantedError: "validate \"security_group\": validate ingress[0]: \"ports\" must be specified",
},
"error if security group ingress is limited to a cdn distribution not enabled": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Enabled: aws.Bool(false),
},
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
RestrictiveIngress: RestrictiveIngress{
CDNIngress: aws.Bool(true),
},
},
},
},
},
},
wantedError: "CDN must be enabled to limit security group ingress to CloudFront",
},
"valid vpc flowlogs with default retention": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
FlowLogs: Union[*bool, VPCFlowLogsArgs]{
Basic: aws.Bool(true),
},
},
},
},
},
"valid vpc flowlogs with a specified retention": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
FlowLogs: Union[*bool, VPCFlowLogsArgs]{
Advanced: VPCFlowLogsArgs{
Retention: aws.Int(30),
},
},
},
},
},
},
"valid elb access logs config with bucket_prefix": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
ELBAccessLogs: ELBAccessLogsArgsOrBool{
AdvancedConfig: ELBAccessLogsArgs{
Prefix: aws.String("prefix"),
},
},
},
},
},
},
"valid elb access logs config with both bucket_prefix and bucket_name": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
ELBAccessLogs: ELBAccessLogsArgsOrBool{
AdvancedConfig: ELBAccessLogsArgs{
Prefix: aws.String("prefix"),
BucketName: aws.String("bucketName"),
},
},
},
},
},
},
"error if cdn cert specified, cdn not terminating tls, and public certs not specified": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
},
},
},
wantedError: `"cdn.terminate_tls" must be true if "cdn.certificate" is set without "http.public.certificates"`,
},
"success if cdn cert specified, cdn terminating tls, and no public certs": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
TerminateTLS: aws.Bool(true),
},
},
},
},
"error if cdn cert not specified but public certs imported": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Enabled: aws.Bool(true),
},
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"},
},
},
},
wantedError: "\"cdn.certificate\" must be specified if \"http.public.certificates\" and \"cdn\" are specified",
},
"error if subnets specified for internal ALB placement don't exist": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
ID: aws.String("mockID"),
Subnets: subnetsConfiguration{
Private: []subnetConfiguration{
{SubnetID: aws.String("existentSubnet")},
{SubnetID: aws.String("anotherExistentSubnet")},
},
},
},
},
HTTPConfig: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
InternalALBSubnets: []string{"nonexistentSubnet"},
},
},
},
wantedError: "subnet(s) specified for internal ALB placement not imported",
},
"valid case with internal ALB placement": {
in: EnvironmentConfig{
Network: environmentNetworkConfig{
VPC: environmentVPCConfig{
ID: aws.String("mockID"),
Subnets: subnetsConfiguration{
Private: []subnetConfiguration{
{SubnetID: aws.String("existentSubnet")},
{SubnetID: aws.String("anotherExistentSubnet")},
},
Public: []subnetConfiguration{
{SubnetID: aws.String("publicSubnet1")},
{SubnetID: aws.String("publicSubnet2")},
},
},
},
},
HTTPConfig: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
InternalALBSubnets: []string{"existentSubnet", "anotherExistentSubnet"},
},
},
},
},
"returns error when http private config with deprecated and a new ingress field": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
Ingress: RelaxedIngress{
VPCIngress: aws.Bool(true),
},
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
VPCIngress: aws.Bool(true),
},
},
},
},
},
wantedError: "validate \"http config\": validate \"private\": must specify one, not both, of \"private.http.security_groups.ingress\" and \"private.http.ingress\"",
},
"no error when http private config with a new ingress field": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
Ingress: RelaxedIngress{
VPCIngress: aws.Bool(true),
},
},
},
},
},
"returns error when http public config with deprecated and a new ingress field": {
in: EnvironmentConfig{
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Ingress: RestrictiveIngress{
CDNIngress: aws.Bool(true),
},
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
RestrictiveIngress: RestrictiveIngress{
CDNIngress: aws.Bool(true),
},
},
},
},
},
},
wantedError: "validate \"http config\": validate \"public\": must specify one, not both, of \"public.http.security_groups.ingress\" and \"public.http.ingress\"",
},
"no error when http public config with a new ingress field": {
in: EnvironmentConfig{
CDNConfig: EnvironmentCDNConfig{
Enabled: aws.Bool(true),
},
HTTPConfig: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Ingress: RestrictiveIngress{
CDNIngress: aws.Bool(true),
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedError != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedError)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestEnvironmentNetworkConfig_validate(t *testing.T) {
mockVPCCIDR := IPNet("10.0.0.0/16")
testCases := map[string]struct {
in environmentNetworkConfig
wantedErrorMsgPrefix string
}{
"malformed vpc": {
in: environmentNetworkConfig{
VPC: environmentVPCConfig{
ID: stringP("vpc-123"),
CIDR: &mockVPCCIDR,
},
},
wantedErrorMsgPrefix: `validate "vpc": `,
},
"succeed on empty config": {},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestEnvironmentVPCConfig_validate(t *testing.T) {
var (
mockVPCCIDR = IPNet("10.0.0.0/16")
mockPublicSubnet1CIDR = IPNet("10.0.0.0/24")
mockPublicSubnet2CIDR = IPNet("10.0.1.0/24")
mockPublicSubnet3CIDR = IPNet("10.0.2.0/24")
mockPrivateSubnet1CIDR = IPNet("10.0.3.0/24")
mockPrivateSubnet2CIDR = IPNet("10.0.4.0/24")
mockPrivateSubnet3CIDR = IPNet("10.0.5.0/24")
)
testCases := map[string]struct {
in environmentVPCConfig
wantedErrorMsgPrefix string
wantedErr error
}{
"malformed subnets": {
in: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("mock-public-subnet-1"),
CIDR: &mockPublicSubnet1CIDR,
},
{
SubnetID: aws.String("mock-public-subnet-2"),
CIDR: &mockPublicSubnet1CIDR,
},
},
},
},
wantedErrorMsgPrefix: `validate "subnets": `,
},
"error if vpc is both imported and configured": {
in: environmentVPCConfig{
ID: aws.String("vpc-1234"),
CIDR: &mockVPCCIDR,
},
wantedErr: errors.New(`cannot import VPC resources (with "id" fields) and customize VPC resources (with "cidr" and "az" fields) at the same time`),
},
"error if importing vpc while subnets are configured": {
in: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
},
},
},
},
wantedErr: errors.New(`validate "subnets" for an imported VPC: validate public[0]: "id" must be specified`),
},
"error if importing vpc while no subnet is imported": {
in: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{},
},
wantedErr: errors.New(`validate "subnets" for an imported VPC: VPC must have subnets in order to proceed with environment creation`),
},
"error if importing vpc while only one private subnet is imported": {
in: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("mock-public-subnet-1"),
},
},
Private: []subnetConfiguration{
{
SubnetID: aws.String("mock-private-subnet-1"),
},
{
SubnetID: aws.String("mock-private-subnet-2"),
},
},
},
},
wantedErr: errors.New(`validate "subnets" for an imported VPC: validate "public": at least two public subnets must be imported to enable Load Balancing`),
},
"error if importing vpc while only one public subnet is imported": {
in: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("mock-public-subnet-1"),
},
{
SubnetID: aws.String("mock-public-subnet-2"),
},
},
Private: []subnetConfiguration{
{
SubnetID: aws.String("mock-private-subnet-1"),
},
},
},
},
wantedErr: errors.New(`validate "subnets" for an imported VPC: validate "private": at least two private subnets must be imported`),
},
"error if configuring vpc without enough azs": {
in: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2a"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-2a"),
},
},
},
},
wantedErr: errors.New(`validate "subnets" for an adjusted VPC: require at least 2 availability zones`),
},
"error if configuring vpc while some subnets are imported": {
in: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2a"),
},
{
SubnetID: aws.String("mock-public-subnet-1"),
},
},
},
},
wantedErr: errors.New(`validate "subnets" for an adjusted VPC: validate public[1]: "cidr" must be specified`),
},
"error if configuring vpc while azs do not match between private and public subnets": {
in: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-2c"),
},
},
},
},
wantedErr: errors.New("validate \"subnets\" for an adjusted VPC: public subnets and private subnets do not span the same availability zones"),
},
"error if configuring vpc while the number of public subnet CIDR does not match the number of azs": {
in: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
{
CIDR: &mockPublicSubnet3CIDR,
AZ: aws.String("us-east-2b"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
},
},
},
wantedErr: errors.New(`validate "subnets" for an adjusted VPC: validate "public": number of public subnet CIDRs (3) does not match number of AZs (2)`),
},
"error if configuring vpc while the number of private subnet CIDR does not match the number of azs": {
in: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
{
CIDR: &mockPrivateSubnet3CIDR,
AZ: aws.String("us-east-2b"),
},
},
},
},
wantedErr: errors.New(`validate "subnets" for an adjusted VPC: validate "private": number of private subnet CIDRs (3) does not match number of AZs (2)`),
},
"succeed on imported vpc": {
in: environmentVPCConfig{
ID: aws.String("vpc-1234"),
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("mock-public-subnet-1"),
},
{
SubnetID: aws.String("mock-public-subnet-2"),
},
},
Private: []subnetConfiguration{
{
SubnetID: aws.String("mock-private-subnet-1"),
},
{
SubnetID: aws.String("mock-private-subnet-2"),
},
},
},
},
},
"succeed on managed vpc that is fully adjusted ": {
in: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPublicSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
{
CIDR: &mockPublicSubnet3CIDR,
AZ: aws.String("us-east-2c"),
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
AZ: aws.String("us-east-2a"),
},
{
CIDR: &mockPrivateSubnet2CIDR,
AZ: aws.String("us-east-2b"),
},
{
CIDR: &mockPrivateSubnet3CIDR,
AZ: aws.String("us-east-2c"),
},
},
},
},
},
"succeed on managed vpc that does not adjust az": {
in: environmentVPCConfig{
CIDR: &mockVPCCIDR,
Subnets: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
},
{
CIDR: &mockPublicSubnet2CIDR,
},
{
CIDR: &mockPublicSubnet3CIDR,
},
},
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
},
{
CIDR: &mockPrivateSubnet2CIDR,
},
{
CIDR: &mockPrivateSubnet3CIDR,
},
},
},
},
},
"succeed on empty config": {},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedErr == nil && tc.wantedErrorMsgPrefix == "" {
require.NoError(t, gotErr)
}
if tc.wantedErr != nil {
require.EqualError(t, tc.wantedErr, gotErr.Error())
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
}
})
}
}
func TestSubnetsConfiguration_validate(t *testing.T) {
var (
mockPublicSubnet1CIDR = IPNet("10.0.0.0/24")
mockPrivateSubnet1CIDR = IPNet("10.0.3.0/24")
)
testCases := map[string]struct {
in subnetsConfiguration
wantedErrorMsgPrefix string
}{
"malformed public subnets": {
in: subnetsConfiguration{
Public: []subnetConfiguration{
{
CIDR: &mockPublicSubnet1CIDR,
SubnetID: aws.String("mock-public-subnet-1"),
},
},
},
wantedErrorMsgPrefix: `validate "public[0]": `,
},
"malformed private subnets": {
in: subnetsConfiguration{
Private: []subnetConfiguration{
{
CIDR: &mockPrivateSubnet1CIDR,
SubnetID: aws.String("mock-private-subnet-1"),
},
},
},
wantedErrorMsgPrefix: `validate "private[0]": `,
},
"success": {
in: subnetsConfiguration{
Public: []subnetConfiguration{
{
SubnetID: aws.String("mock-public-subnet-1"),
},
},
Private: []subnetConfiguration{
{
SubnetID: aws.String("mock-private-subnet-1"),
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestCDNConfiguration_validate(t *testing.T) {
testCases := map[string]struct {
in EnvironmentCDNConfig
wantedError error
wantedErrorMsgPrefix string
}{
"valid if empty": {
in: EnvironmentCDNConfig{},
},
"valid if bool specified": {
in: EnvironmentCDNConfig{
Enabled: aws.Bool(false),
},
},
"success with cert without tls termination": {
in: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
},
},
},
"error if certificate invalid": {
in: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:weird-little-arn"),
},
},
wantedErrorMsgPrefix: "parse cdn certificate:",
},
"error if certificate in invalid region": {
in: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-west-2:1111111:certificate/look-like-a-good-arn"),
},
},
wantedError: errors.New("cdn certificate must be in region us-east-1"),
},
"error if static config invalid": {
in: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Static: CDNStaticConfig{
Path: "something",
},
},
},
wantedErrorMsgPrefix: `validate "static_assets"`,
},
"success with cert and terminate tls": {
in: EnvironmentCDNConfig{
Config: AdvancedCDNConfig{
Certificate: aws.String("arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"),
TerminateTLS: aws.Bool(true),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
} else if tc.wantedError != nil {
require.Error(t, gotErr)
require.EqualError(t, tc.wantedError, gotErr.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestCDNStaticConfig_validate(t *testing.T) {
testCases := map[string]struct {
in CDNStaticConfig
wantedError error
}{
"valid if empty": {
in: CDNStaticConfig{},
},
"invalid if alias is not specified": {
in: CDNStaticConfig{
Path: "something",
},
wantedError: fmt.Errorf(`"alias" must be specified`),
},
"invalid if location is not specified": {
in: CDNStaticConfig{
Alias: "example.com",
},
wantedError: fmt.Errorf(`"location" must be specified`),
},
"invalid if path is not specified": {
in: CDNStaticConfig{
Alias: "example.com",
Location: "s3url",
},
wantedError: fmt.Errorf(`"path" must be specified`),
},
"success": {
in: CDNStaticConfig{
Alias: "example.com",
Location: "static",
Path: "something",
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedError != nil {
require.Error(t, gotErr)
require.EqualError(t, tc.wantedError, gotErr.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestSubnetConfiguration_validate(t *testing.T) {
mockCIDR := IPNet("10.0.0.0/24")
testCases := map[string]struct {
in subnetConfiguration
wantedError error
}{
"error if id and cidr are both specified": {
in: subnetConfiguration{
SubnetID: aws.String("mock-subnet-1"),
CIDR: &mockCIDR,
},
wantedError: &errFieldMutualExclusive{
firstField: "id",
secondField: "cidr",
mustExist: false,
},
},
"error if id and az are both specified": {
in: subnetConfiguration{
SubnetID: aws.String("mock-subnet-1"),
AZ: aws.String("us-east-2a"),
},
wantedError: &errFieldMutualExclusive{
firstField: "id",
secondField: "az",
mustExist: false,
},
},
"succeed with id": {
in: subnetConfiguration{
SubnetID: aws.String("mock-subnet-1"),
},
},
"succeed with cidr": {
in: subnetConfiguration{
CIDR: &mockCIDR,
},
},
"succeed with az": {
in: subnetConfiguration{
AZ: aws.String("us-east-2a"),
},
},
"succeed with both cidr and az": {
in: subnetConfiguration{
AZ: aws.String("us-east-2a"),
CIDR: &mockCIDR,
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedError != nil {
require.Error(t, gotErr)
require.EqualError(t, tc.wantedError, gotErr.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestEnvironmentHTTPConfig_validate(t *testing.T) {
testCases := map[string]struct {
in EnvironmentHTTPConfig
wantedErrorMsgPrefix string
wantedError error
}{
"malformed public certificate": {
in: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"arn:aws:weird-little-arn"},
},
},
wantedErrorMsgPrefix: `parse "certificates[0]": `,
},
"malformed private certificate": {
in: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
Certificates: []string{"arn:aws:weird-little-arn"},
},
},
wantedErrorMsgPrefix: `parse "certificates[0]": `,
},
"success with public cert": {
in: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
Certificates: []string{"arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"},
},
},
},
"success with private cert": {
in: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
Certificates: []string{"arn:aws:acm:us-east-1:1111111:certificate/look-like-a-good-arn"},
},
},
},
"public http config with invalid security group ingress": {
in: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
VPCIngress: aws.Bool(true),
},
},
},
},
wantedError: fmt.Errorf(`validate "public": a public load balancer already allows vpc ingress`),
},
"private http config with invalid security group ingress": {
in: EnvironmentHTTPConfig{
Private: privateHTTPConfig{
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
RestrictiveIngress: RestrictiveIngress{
CDNIngress: aws.Bool(true),
},
},
},
},
},
wantedError: fmt.Errorf(`validate "private": an internal load balancer cannot have restrictive ingress fields`),
},
"public http config with invalid source ips": {
in: EnvironmentHTTPConfig{
Public: PublicHTTPConfig{
DeprecatedSG: DeprecatedALBSecurityGroupsConfig{
DeprecatedIngress: DeprecatedIngress{
RestrictiveIngress: RestrictiveIngress{SourceIPs: []IPNet{"1.1.1.invalidip"}},
},
},
},
},
wantedError: fmt.Errorf(`validate "public": parse IPNet 1.1.1.invalidip: invalid CIDR address: 1.1.1.invalidip`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
} else if tc.wantedError != nil {
require.Error(t, gotErr)
require.EqualError(t, tc.wantedError, gotErr.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
| 1,101 |
copilot-cli | aws | Go | //go:build integration || localintegration
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"fmt"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func basicTypesString() []string {
var types []string
for _, k := range basicKinds {
types = append(types, k.String())
}
types = append(types, reflect.TypeOf(time.Duration(0)).String())
types = append(types, reflect.TypeOf(yaml.Node{}).String())
return types
}
type validator interface {
validate() error
}
// Test_ValidateAudit ensures that every manifest struct implements "Validate()" method.
func Test_ValidateAudit(t *testing.T) {
// Audit workload manifests.
testCases := map[string]struct {
mft interface{}
}{
"backend service": {
mft: &BackendService{},
},
"load balanced web service": {
mft: &LoadBalancedWebService{},
},
"request-driven web service": {
mft: &RequestDrivenWebService{},
},
"schedule job": {
mft: &ScheduledJob{},
},
"worker service": {
mft: &WorkerService{},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := isValid(reflect.ValueOf(tc.mft).Type())
require.NoError(t, err)
})
}
// Audit environment manifest.
t.Run("environment manifest", func(t *testing.T) {
env := &Environment{}
err := isValid(reflect.ValueOf(env.EnvironmentConfig).Type())
require.NoError(t, err)
})
}
func isValid(typ reflect.Type) error {
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
// Skip if it is a type that doesn't need to implement Validate().
for _, k := range basicTypesString() {
if typ.String() == k {
return nil
}
}
if typ.Kind() == reflect.Interface {
return nil
}
// For slice and map, validate its member type.
if typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice || typ.Kind() == reflect.Map {
if err := isValid(typ.Elem()); err != nil {
return err
}
return nil
}
// Check if the field implements Validate().
var val validator
validatorType := reflect.TypeOf(&val).Elem()
if !typ.Implements(validatorType) {
return fmt.Errorf(`%v does not implement "validate()"`, typ)
}
// For struct we'll check its members after its own validation.
if typ.Kind() != reflect.Struct {
return nil
}
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
// Skip private fields.
if !field.IsExported() {
continue
}
if err := isValid(field.Type); err != nil {
return err
}
}
return nil
}
| 111 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/stretchr/testify/require"
)
func TestLoadBalancedWebService_validate(t *testing.T) {
testImageConfig := ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
},
},
Port: uint16P(80),
},
}
testCases := map[string]struct {
lbConfig LoadBalancedWebService
wantedError error
wantedErrorMsgPrefix string
}{
"error if fail to validate image": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
Location: aws.String("mockLocation"),
},
},
},
},
},
},
wantedErrorMsgPrefix: `validate "image": `,
},
"error if fail to validate grace_period when specified in the additional listener rules of ALB": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
HealthCheck: HealthCheckArgsOrString{
Union: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("/testing"),
HealthyThreshold: aws.Int64(5),
UnhealthyThreshold: aws.Int64(6),
Interval: durationp(78 * time.Second),
Timeout: durationp(9 * time.Second),
}),
},
},
AdditionalRoutingRules: []RoutingRule{
{
Path: stringP("/"),
HealthCheck: HealthCheckArgsOrString{
Union: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("/testing"),
HealthyThreshold: aws.Int64(5),
UnhealthyThreshold: aws.Int64(6),
Interval: durationp(78 * time.Second),
Timeout: durationp(9 * time.Second),
GracePeriod: durationp(9 * time.Second),
}),
},
},
},
},
},
},
},
wantedError: fmt.Errorf(`validate "grace_period": %w`, &errGracePeriodSpecifiedInAdditionalRule{0}),
},
"error if fail to validate grace_period when specified the additional listener of NLB": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
Enabled: aws.Bool(false),
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: stringP("80"),
HealthCheck: NLBHealthCheckArgs{GracePeriod: durationp(9 * time.Second)},
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: stringP("80"),
HealthCheck: NLBHealthCheckArgs{GracePeriod: durationp(9 * time.Second)},
},
},
},
},
},
wantedError: fmt.Errorf(`validate "grace_period": %w`, &errGracePeriodSpecifiedInAdditionalListener{0}),
},
"error if fail to validate grace_period when specified in ALB and NLB at the same time": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
HealthCheck: HealthCheckArgsOrString{
Union: AdvancedToUnion[string](HTTPHealthCheckArgs{
Path: aws.String("/testing"),
HealthyThreshold: aws.Int64(5),
UnhealthyThreshold: aws.Int64(6),
Interval: durationp(78 * time.Second),
Timeout: durationp(9 * time.Second),
GracePeriod: durationp(9 * time.Second),
}),
},
},
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: stringP("80"),
HealthCheck: NLBHealthCheckArgs{GracePeriod: durationp(9 * time.Second)},
},
},
},
},
wantedError: fmt.Errorf(`validate "grace_period": %w`, &errGracePeriodsInBothALBAndNLB{errFieldMutualExclusive{firstField: "http.healthcheck.grace_period", secondField: "nlb.healthcheck.grace_period"}}),
},
"error if fail to validate http": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
TargetContainer: aws.String("mockTargetContainer"),
},
TargetContainerCamelCase: aws.String("mockTargetContainer"),
},
},
},
},
wantedErrorMsgPrefix: `validate "http": `,
},
"error if fail to validate sidecars": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: DependsOn{
"foo": "bar",
},
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "sidecars[foo]": `,
},
"error if fail to validate network": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "network": `,
},
"error if fail to validate publish config": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
PublishConfig: PublishConfig{
Topics: []Topic{
{},
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "publish": `,
},
"error if fail to validate taskdef override": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
TaskDefOverrides: []OverrideRule{
{
Path: "Family",
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "taskdef_overrides[0]": `,
},
"error if name is not set": {
lbConfig: LoadBalancedWebService{
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedError: fmt.Errorf(`"name" must be specified`),
},
"error if fail to validate HTTP load balancer target": {
lbConfig: LoadBalancedWebService{
Workload: Workload{Name: aws.String("mockName")},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
TargetContainer: aws.String("foo"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate load balancer target for "http":`,
},
"error if fail to validate network load balancer target": {
lbConfig: LoadBalancedWebService{
Workload: Workload{Name: aws.String("mockName")},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
TargetContainer: aws.String("mockName"),
},
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443"),
TargetContainer: aws.String("foo"),
},
},
},
},
wantedErrorMsgPrefix: `validate target for "nlb": `,
},
"error if fail to validate network load balancer target for additional listener": {
lbConfig: LoadBalancedWebService{
Workload: Workload{Name: aws.String("mockName")},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
TargetContainer: aws.String("mockName"),
},
},
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443"),
TargetContainer: aws.String("mockName"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("444"),
TargetContainer: aws.String("foo"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate target for "nlb.additional_listeners[0]": `,
},
"error if fail to validate dependencies": {
lbConfig: LoadBalancedWebService{
Workload: Workload{Name: aws.String("mockName")},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: map[string]string{"bar": "healthy"},
Essential: aws.Bool(false),
},
"bar": {
DependsOn: map[string]string{"foo": "healthy"},
Essential: aws.Bool(false),
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate container dependencies: `,
},
"error if fail to validate windows": {
lbConfig: LoadBalancedWebService{
Workload: Workload{Name: aws.String("mockName")},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("windows/amd64"))},
Storage: Storage{Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
},
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate Windows: `,
},
"error if fail to validate ARM": {
lbConfig: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("linux/arm64"))},
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(123),
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
},
},
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate ARM: `,
},
"error if neither of http or nlb is enabled": {
lbConfig: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
Enabled: aws.Bool(false),
},
},
},
wantedError: errors.New(`must specify at least one of "http" or "nlb"`),
},
"error if scaling based on nlb requests": {
lbConfig: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Requests: ScalingConfigOrT[int]{
Value: aws.Int(3),
},
},
},
},
HTTPOrBool: HTTPOrBool{
Enabled: aws.Bool(false),
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("80"),
},
},
},
},
wantedError: errors.New(`scaling based on "nlb" requests or response time is not supported`),
},
"error if scaling based on nlb response time": {
lbConfig: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
ResponseTime: ScalingConfigOrT[time.Duration]{
Value: durationp(10 * time.Second),
},
},
},
},
HTTPOrBool: HTTPOrBool{
Enabled: aws.Bool(false),
},
NLBConfig: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("80"),
},
},
},
},
wantedError: errors.New(`scaling based on "nlb" requests or response time is not supported`),
},
"error if fail to validate deployment": {
lbConfig: LoadBalancedWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{
ImageConfig: testImageConfig,
HTTPOrBool: HTTPOrBool{
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
},
},
},
DeployConfig: DeploymentConfig{
DeploymentControllerConfig: DeploymentControllerConfig{
Rolling: aws.String("mockName"),
}},
},
},
wantedErrorMsgPrefix: `validate "deployment"`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.lbConfig.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestBackendService_validate(t *testing.T) {
testImageConfig := ImageWithHealthcheckAndOptionalPort{
ImageWithOptionalPort: ImageWithOptionalPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
},
},
},
}
testCases := map[string]struct {
config BackendService
wantedErrorMsgPrefix string
wantedError error
}{
"error if fail to validate image": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: ImageWithHealthcheckAndOptionalPort{
ImageWithOptionalPort: ImageWithOptionalPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
Location: aws.String("mockLocation"),
},
},
},
},
},
},
wantedErrorMsgPrefix: `validate "image": `,
},
"error if fail to validate sidecars": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: DependsOn{
"foo": "bar",
},
},
},
},
},
wantedErrorMsgPrefix: `validate "sidecars[foo]": `,
},
"error if fail to validate network": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "network": `,
},
"error if fail to validate publish config": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
PublishConfig: PublishConfig{
Topics: []Topic{
{},
},
},
},
},
wantedErrorMsgPrefix: `validate "publish": `,
},
"error if fail to validate taskdef override": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
TaskDefOverrides: []OverrideRule{
{
Path: "Family",
},
},
},
},
wantedErrorMsgPrefix: `validate "taskdef_overrides[0]": `,
},
"error if name is not set": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
},
},
wantedError: fmt.Errorf(`"name" must be specified`),
},
"error if fail to validate dependencies": {
config: BackendService{
Workload: Workload{Name: aws.String("mockName")},
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: map[string]string{"bar": "start"},
},
"bar": {
DependsOn: map[string]string{"foo": "start"},
},
},
},
},
wantedErrorMsgPrefix: `validate container dependencies: `,
},
"error if fail to validate Windows": {
config: BackendService{
Workload: Workload{Name: aws.String("mockName")},
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("windows/amd64"))},
Storage: Storage{Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
},
},
},
},
},
wantedErrorMsgPrefix: `validate Windows: `,
},
"error if fail to validate ARM": {
config: BackendService{
Workload: Workload{
Name: aws.String("mockName"),
},
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("linux/arm64"))},
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(123),
workloadType: manifestinfo.BackendServiceType,
},
},
},
},
},
wantedErrorMsgPrefix: `validate ARM: `,
},
"error if fail to validate deployment": {
config: BackendService{
Workload: Workload{
Name: aws.String("mockName"),
},
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("linux/arm64"))},
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(123),
workloadType: manifestinfo.BackendServiceType,
},
},
},
DeployConfig: DeploymentConfig{
DeploymentControllerConfig: DeploymentControllerConfig{
Rolling: aws.String("mockName"),
}},
},
},
wantedErrorMsgPrefix: `validate "deployment":`,
},
"error if fail to validate http": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
HTTP: HTTP{
Main: RoutingRule{
ProtocolVersion: aws.String("GRPC"),
},
},
},
},
wantedErrorMsgPrefix: `validate "http": "path" must be specified`,
},
"error if request scaling without http": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
workloadType: manifestinfo.BackendServiceType,
Requests: ScalingConfigOrT[int]{
Value: aws.Int(128),
},
},
},
},
},
},
wantedError: errors.New(`"http" must be specified if "count.requests" or "count.response_time" are specified`),
},
"error if invalid topic is defined": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
PublishConfig: PublishConfig{
Topics: []Topic{
{
Name: aws.String("mytopic.fifo"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "publish": `,
},
"error if target container not found": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
HTTP: HTTP{
Main: RoutingRule{
TargetContainer: aws.String("api"),
Path: aws.String("/"),
},
},
},
Workload: Workload{
Name: aws.String("api"),
},
},
wantedError: fmt.Errorf(`validate load balancer target for "http": target container "api" doesn't expose a port`),
},
"error if service connect is enabled without any port exposed": {
config: BackendService{
BackendServiceConfig: BackendServiceConfig{
ImageConfig: testImageConfig,
Network: NetworkConfig{
Connect: ServiceConnectBoolOrArgs{
ServiceConnectArgs: ServiceConnectArgs{
Alias: aws.String("some alias"),
},
},
},
},
Workload: Workload{
Name: aws.String("api"),
},
},
wantedError: fmt.Errorf(`cannot set "network.connect.alias" when no ports are exposed`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestRequestDrivenWebService_validate(t *testing.T) {
testCases := map[string]struct {
config RequestDrivenWebService
wantedError error
wantedErrorMsgPrefix string
}{
"error if fail to validate image": {
config: RequestDrivenWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
Location: aws.String("mockLocation"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "image": `,
},
"error if fail to validate instance": {
config: RequestDrivenWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
},
},
Port: uint16P(80),
},
InstanceConfig: AppRunnerInstanceConfig{
CPU: nil,
Memory: nil,
Platform: PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("mockPlatform")),
},
},
},
},
wantedErrorMsgPrefix: `validate "platform": `,
},
"error if fail to validate network": {
config: RequestDrivenWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
},
},
Port: uint16P(80),
},
Network: RequestDrivenWebServiceNetworkConfig{
VPC: rdwsVpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "network": `,
},
"error if fail to validate observability": {
config: RequestDrivenWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: stringP("mockLocation"),
},
},
Port: uint16P(80),
},
Observability: Observability{
Tracing: aws.String("unknown-vendor"),
},
},
},
wantedErrorMsgPrefix: `validate "observability": `,
},
"error if name is not set": {
config: RequestDrivenWebService{
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
},
},
Port: uint16P(80),
},
},
},
wantedError: fmt.Errorf(`"name" must be specified`),
},
"error if placement is not private": {
config: RequestDrivenWebService{
Workload: Workload{
Name: aws.String("mockName"),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: stringP("mockLocation"),
},
},
Port: uint16P(80),
},
Network: RequestDrivenWebServiceNetworkConfig{
VPC: rdwsVpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
},
wantedError: fmt.Errorf(`placement "public" is not supported for Request-Driven Web Service`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestWorkerService_validate(t *testing.T) {
testImageConfig := ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
},
},
}
testCases := map[string]struct {
config WorkerService
wantedError error
wantedErrorMsgPrefix string
}{
"error if fail to validate image": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
Location: aws.String("mockLocation"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "image": `,
},
"error if fail to validate sidecars": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: DependsOn{
"foo": "bar",
},
},
},
},
},
wantedErrorMsgPrefix: `validate "sidecars[foo]": `,
},
"error if fail to validate network": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "network": `,
},
"error if fail to validate subscribe": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("mockTopic"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "subscribe": `,
},
"error if fail to validate publish": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
PublishConfig: PublishConfig{
Topics: []Topic{
{},
},
},
},
},
wantedErrorMsgPrefix: `validate "publish": `,
},
"error if fail to validate taskdef override": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
TaskDefOverrides: []OverrideRule{
{
Path: "Family",
},
},
},
},
wantedErrorMsgPrefix: `validate "taskdef_overrides[0]": `,
},
"error if name is not set": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
},
},
wantedError: fmt.Errorf(`"name" must be specified`),
},
"error if fail to validate dependencies": {
config: WorkerService{
Workload: Workload{Name: aws.String("mockWorkload")},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: map[string]string{"bar": "start"},
},
"bar": {
DependsOn: map[string]string{"foo": "start"},
},
},
},
},
wantedErrorMsgPrefix: `validate container dependencies: `,
},
"error if fail to validate windows": {
config: WorkerService{
Workload: Workload{Name: aws.String("mockName")},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("windows/amd64"))},
Storage: Storage{Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
},
},
},
},
},
wantedErrorMsgPrefix: `validate Windows: `,
},
"error if fail to validate ARM": {
config: WorkerService{
Workload: Workload{
Name: aws.String("mockName"),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("linux/arm64"))},
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(123),
workloadType: manifestinfo.WorkerServiceType,
},
},
},
},
},
wantedErrorMsgPrefix: `validate ARM: `,
},
"error if fail to validate deployment": {
config: WorkerService{
Workload: Workload{
Name: aws.String("mockName"),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("linux/arm64"))},
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(123),
workloadType: manifestinfo.WorkerServiceType,
},
},
},
DeployConfig: WorkerDeploymentConfig{
DeploymentControllerConfig: DeploymentControllerConfig{
Rolling: aws.String("mockName"),
}},
},
},
wantedErrorMsgPrefix: `validate "deployment":`,
},
"error if service connect is enabled without any port exposed": {
config: WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: testImageConfig,
Network: NetworkConfig{
Connect: ServiceConnectBoolOrArgs{
ServiceConnectArgs: ServiceConnectArgs{
Alias: aws.String("some alias"),
},
},
},
},
Workload: Workload{
Name: aws.String("api"),
},
},
wantedError: fmt.Errorf(`cannot set "network.connect.alias" when no ports are exposed`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestScheduledJob_validate(t *testing.T) {
testImageConfig := ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
},
},
}
testCases := map[string]struct {
config ScheduledJob
wantedError error
wantedErrorMsgPrefix string
}{
"error if fail to validate image": {
config: ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("mockBuild")},
Location: aws.String("mockLocation"),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "image": `,
},
"error if fail to validate sidecars": {
config: ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: DependsOn{
"foo": "bar",
},
},
},
},
},
wantedErrorMsgPrefix: `validate "sidecars[foo]": `,
},
"error if fail to validate network": {
config: ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
},
},
},
wantedErrorMsgPrefix: `validate "network": `,
},
"error if fail to validate on": {
config: ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
On: JobTriggerConfig{},
},
},
wantedErrorMsgPrefix: `validate "on": `,
},
"error if fail to validate publish config": {
config: ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
On: JobTriggerConfig{
Schedule: aws.String("mockSchedule"),
},
PublishConfig: PublishConfig{
Topics: []Topic{
{},
},
},
},
},
wantedErrorMsgPrefix: `validate "publish": `,
},
"error if fail to validate taskdef override": {
config: ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
On: JobTriggerConfig{
Schedule: aws.String("mockSchedule"),
},
TaskDefOverrides: []OverrideRule{
{
Path: "Family",
},
},
},
},
wantedErrorMsgPrefix: `validate "taskdef_overrides[0]": `,
},
"error if name is not set": {
config: ScheduledJob{
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
On: JobTriggerConfig{
Schedule: aws.String("mockSchedule"),
},
},
},
wantedError: fmt.Errorf(`"name" must be specified`),
},
"error if fail to validate dependencies": {
config: ScheduledJob{
Workload: Workload{Name: aws.String("mockWorkload")},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
On: JobTriggerConfig{
Schedule: aws.String("mockSchedule"),
},
Sidecars: map[string]*SidecarConfig{
"foo": {
DependsOn: map[string]string{"bar": "start"},
},
"bar": {
DependsOn: map[string]string{"foo": "start"},
},
},
},
},
wantedErrorMsgPrefix: `validate container dependencies: `,
},
"error if fail to validate windows": {
config: ScheduledJob{
Workload: Workload{Name: aws.String("mockName")},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: testImageConfig,
On: JobTriggerConfig{
Schedule: aws.String("mockSchedule"),
},
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("windows/amd64"))},
Storage: Storage{Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
},
},
},
},
},
wantedErrorMsgPrefix: `validate Windows: `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestPipelineManifest_validate(t *testing.T) {
testCases := map[string]struct {
Pipeline Pipeline
wantedError error
wantedErrorMsgPrefix string
}{
"error if name exceeds 100 characters": {
Pipeline: Pipeline{
Name: "12345678902234567890323456789042345678905234567890623456789072345678908234567890923456789010234567890",
},
wantedError: errors.New("pipeline name '12345678902234567890323456789042345678905234567890623456789072345678908234567890923456789010234567890' must be shorter than 100 characters"),
},
"should validate pipeline stages": {
Pipeline: Pipeline{
Name: "release",
Stages: []PipelineStage{
{
Name: "test",
Deployments: map[string]*Deployment{
"frontend": {
DependsOn: []string{"backend"},
},
},
},
},
},
wantedErrorMsgPrefix: `validate "deployments" for pipeline stage test:`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.Pipeline.Validate()
switch {
case tc.wantedError != nil:
require.EqualError(t, gotErr, tc.wantedError.Error())
case tc.wantedErrorMsgPrefix != "":
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
default:
require.NoError(t, gotErr)
}
})
}
}
func TestDeployments_validate(t *testing.T) {
testCases := map[string]struct {
in Deployments
wanted error
}{
"should return nil on empty deployments": {},
"should return an error when a dependency does not exist": {
in: map[string]*Deployment{
"frontend": {
DependsOn: []string{"backend"},
},
},
wanted: errors.New("dependency deployment named 'backend' of 'frontend' does not exist"),
},
"should return nil when all dependencies are present": {
in: map[string]*Deployment{
"frontend": {
DependsOn: []string{"backend"},
},
"backend": nil,
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
actual := tc.in.validate()
if tc.wanted == nil {
require.NoError(t, actual)
} else {
require.EqualError(t, actual, tc.wanted.Error())
}
})
}
}
func TestImageWithPort_validate(t *testing.T) {
testCases := map[string]struct {
ImageWithPort ImageWithPort
wantedError error
}{
"error if port is not specified": {
ImageWithPort: ImageWithPort{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("mockLocation"),
},
},
},
wantedError: fmt.Errorf(`"port" must be specified`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.ImageWithPort.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestImage_validate(t *testing.T) {
testCases := map[string]struct {
Image Image
wantedError error
wantedErrorMsgPrefix string
}{
"error if build and location both specified": {
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("mockBuild"),
},
Location: aws.String("mockLocation"),
},
},
wantedError: fmt.Errorf(`must specify one of "build" and "location"`),
},
"error if neither build nor location is specified": {
Image: Image{},
wantedError: fmt.Errorf(`must specify one of "build" and "location"`),
},
"error if fail to validate depends_on": {
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("mockLocation"),
},
DependsOn: DependsOn{
"foo": "bar",
},
},
wantedErrorMsgPrefix: `validate "depends_on":`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.Image.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestDependsOn_validate(t *testing.T) {
testCases := map[string]struct {
in DependsOn
wanted error
}{
"should return an error if dependency status is invalid": {
in: DependsOn{
"foo": "bar",
},
wanted: errors.New("container dependency status must be one of START, COMPLETE, SUCCESS or HEALTHY"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestRoutingRule_validate(t *testing.T) {
testCases := map[string]struct {
RoutingRule RoutingRule
wantedErrorMsgPrefix string
wantedError error
}{
"error if one of allowed_source_ips is not valid": {
RoutingRule: RoutingRule{
Path: stringP("/"),
AllowedSourceIps: []IPNet{
IPNet("10.1.0.0/24"),
IPNet("badIP"),
IPNet("10.1.1.0/24"),
},
},
wantedErrorMsgPrefix: `validate "allowed_source_ips[1]": `,
},
"error if protocol version is not valid": {
RoutingRule: RoutingRule{
Path: stringP("/"),
ProtocolVersion: aws.String("quic"),
},
wantedErrorMsgPrefix: `"version" field value 'quic' must be one of GRPC, HTTP1 or HTTP2`,
},
"error if path is missing": {
RoutingRule: RoutingRule{
ProtocolVersion: aws.String("GRPC"),
},
wantedErrorMsgPrefix: `"path" must be specified`,
},
"should not error if protocol version is not uppercase": {
RoutingRule: RoutingRule{
Path: stringP("/"),
ProtocolVersion: aws.String("gRPC"),
},
},
"error if hosted zone set without alias": {
RoutingRule: RoutingRule{
Path: stringP("/"),
HostedZone: aws.String("ABCD1234"),
},
wantedErrorMsgPrefix: `"alias" must be specified if "hosted_zone" is specified`,
},
"error if one of alias is not valid": {
RoutingRule: RoutingRule{
Path: stringP("/"),
Alias: Alias{
AdvancedAliases: []AdvancedAlias{
{
HostedZone: aws.String("mockHostedZone"),
},
},
},
},
wantedErrorMsgPrefix: `validate "alias":`,
},
"error if fail to valiadte condition values per listener rule": {
RoutingRule: RoutingRule{
Path: stringP("/"),
Alias: Alias{
StringSliceOrString: StringSliceOrString{
StringSlice: []string{
"example.com",
"v1.example.com",
"v2.example.com",
"v3.example.com",
"v4.example.com",
},
},
},
},
wantedError: fmt.Errorf(`validate condition values per listener rule: listener rule has more than five conditions example.com, v1.example.com, v2.example.com, v3.example.com and v4.example.com `),
},
"error if fail to validate condition values for advanced aliases": {
RoutingRule: RoutingRule{
Path: stringP("/"),
Alias: Alias{
AdvancedAliases: []AdvancedAlias{
{
Alias: aws.String("example.com"),
},
{
Alias: aws.String("v1.example.com"),
},
{
Alias: aws.String("v2.example.com"),
},
{
Alias: aws.String("v3.example.com"),
},
{
Alias: aws.String("v4.example.com"),
},
},
},
},
wantedError: fmt.Errorf(`validate condition values per listener rule: listener rule has more than five conditions example.com, v1.example.com, v2.example.com, v3.example.com and v4.example.com `),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.RoutingRule.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestHTTP_validate(t *testing.T) {
testCases := map[string]struct {
HTTP HTTP
wantedErrorMsgPrefix string
wantedError error
}{
"return if routing rule configuration is nil": {},
"error if both target_container and targetContainer are specified": {
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
TargetContainer: aws.String("mockContainer"),
},
TargetContainerCamelCase: aws.String("mockContainer"),
},
wantedError: fmt.Errorf(`must specify one, not both, of "target_container" and "targetContainer"`),
},
"error if the main routing rule is invalid": {
HTTP: HTTP{
Main: RoutingRule{
TargetContainer: aws.String("mockContainer"),
},
},
wantedError: fmt.Errorf(`"path" must be specified`),
},
"error if the additional routing rule is invalid": {
HTTP: HTTP{
Main: RoutingRule{
Path: stringP("/"),
TargetContainer: aws.String("mockContainer"),
},
AdditionalRoutingRules: []RoutingRule{
{
TargetContainer: aws.String("mockContainer"),
},
},
},
wantedError: fmt.Errorf(`validate "additional_rules[0]": "path" must be specified`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.HTTP.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestNetworkLoadBalancerConfiguration_validate(t *testing.T) {
testCases := map[string]struct {
nlb NetworkLoadBalancerConfiguration
wantedErrorMsgPrefix string
wantedError error
}{
"success if empty": {
nlb: NetworkLoadBalancerConfiguration{},
},
"error if port unspecified": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
TargetContainer: aws.String("main"),
},
},
wantedError: fmt.Errorf(`"port" must be specified`),
},
"error if port unspecified in additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("80/tcp"),
TargetContainer: aws.String("main"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
TargetContainer: aws.String("main"),
},
},
},
wantedError: fmt.Errorf(`validate "additional_listeners[0]": "port" must be specified`),
},
"error parsing port": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("sabotage/this/string"),
},
},
wantedErrorMsgPrefix: `validate "nlb": `,
wantedError: fmt.Errorf(`validate "port": cannot parse port mapping from sabotage/this/string`),
},
"error parsing port for additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("80/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("81/tcp"),
},
{
Port: aws.String("sabotage/this/string"),
},
},
},
wantedErrorMsgPrefix: `validate "nlb": `,
wantedError: fmt.Errorf(`validate "additional_listeners[1]": validate "port": cannot parse port mapping from sabotage/this/string`),
},
"success if port is specified without protocol": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443"),
},
},
},
"success if port is specified without protocol in additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("443"),
},
},
},
},
"fail if protocol is not recognized": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tps"),
},
},
wantedErrorMsgPrefix: `validate "nlb": `,
wantedError: fmt.Errorf(`validate "port": invalid protocol tps; valid protocols include TCP and TLS`),
},
"fail if protocol is not recognized in additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("443/tps"),
},
},
},
wantedErrorMsgPrefix: `validate "nlb": `,
wantedError: fmt.Errorf(`validate "additional_listeners[0]": validate "port": invalid protocol tps; valid protocols include TCP and TLS`),
},
"success if tcp": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tcp"),
},
},
},
"error if udp": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("161/udp"),
},
},
wantedError: fmt.Errorf(`validate "port": invalid protocol udp; valid protocols include TCP and TLS`),
},
"error if udp in additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("161/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("161/udp"),
},
},
},
wantedError: fmt.Errorf(`validate "additional_listeners[0]": validate "port": invalid protocol udp; valid protocols include TCP and TLS`),
},
"error if additional listeners are defined before main listener": {
nlb: NetworkLoadBalancerConfiguration{
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("161/udp"),
},
},
},
wantedError: fmt.Errorf(`"port" must be specified`),
},
"success if tls": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tls"),
},
},
},
"success if tls in additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("443/tls"),
},
},
},
},
"error if tcp_udp": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/TCP_udp"),
},
},
wantedError: fmt.Errorf(`validate "port": invalid protocol TCP_udp; valid protocols include TCP and TLS`),
},
"error if tcp_udp in additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("443/TCP_udp"),
},
},
},
wantedError: fmt.Errorf(`validate "additional_listeners[0]": validate "port": invalid protocol TCP_udp; valid protocols include TCP and TLS`),
},
"error if hosted zone is set": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tcp"),
},
Aliases: Alias{
AdvancedAliases: []AdvancedAlias{
{
Alias: aws.String("mockAlias"),
HostedZone: aws.String("mockHostedZone"),
},
},
},
},
wantedError: fmt.Errorf(`"hosted_zone" is not supported for Network Load Balancer`),
},
"error if hosted zone is set in additional listeners": {
nlb: NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("443/tcp"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("80/tcp"),
},
},
Aliases: Alias{
AdvancedAliases: []AdvancedAlias{
{
Alias: aws.String("mockAlias"),
HostedZone: aws.String("mockHostedZone"),
},
},
},
},
wantedError: fmt.Errorf(`"hosted_zone" is not supported for Network Load Balancer`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.nlb.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestAdvancedAlias_validate(t *testing.T) {
testCases := map[string]struct {
in AdvancedAlias
wanted error
}{
"should return an error if name is not specified": {
in: AdvancedAlias{
HostedZone: aws.String("ABCD123"),
},
wanted: errors.New(`"name" must be specified`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestIPNet_validate(t *testing.T) {
testCases := map[string]struct {
in IPNet
wanted error
}{
"should return an error if IPNet is not valid": {
in: IPNet("badIPNet"),
wanted: errors.New("parse IPNet badIPNet: invalid CIDR address: badIPNet"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestTaskConfig_validate(t *testing.T) {
perc := Percentage(70)
mockConfig := ScalingConfigOrT[Percentage]{
Value: &perc,
}
testCases := map[string]struct {
TaskConfig TaskConfig
wantedError error
wantedErrorMsgPrefix string
}{
"error if fail to validate platform": {
TaskConfig: TaskConfig{
Platform: PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("")),
},
},
wantedErrorMsgPrefix: `validate "platform": `,
},
"error if fail to validate count": {
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(123),
CPU: mockConfig,
},
},
},
wantedErrorMsgPrefix: `validate "count": `,
},
"error if fail to validate storage": {
TaskConfig: TaskConfig{
Storage: Storage{
Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(123),
FileSystemID: aws.String("mockID"),
},
},
},
},
},
},
wantedErrorMsgPrefix: `validate "storage": `,
},
"error if invalid env file": {
TaskConfig: TaskConfig{
EnvFile: aws.String("foo"),
},
wantedError: fmt.Errorf("environment file foo must have a .env file extension"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.TaskConfig.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestPlatformArgsOrString_validate(t *testing.T) {
testCases := map[string]struct {
in PlatformArgsOrString
wanted error
}{
"error if platform string is invalid": {
in: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("foobar/amd64"))},
wanted: fmt.Errorf("platform 'foobar/amd64' is invalid; valid platforms are: linux/amd64, linux/x86_64, linux/arm, linux/arm64, windows/amd64 and windows/x86_64"),
},
"error if only half of platform string is specified": {
in: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("linux"))},
wanted: fmt.Errorf("platform 'linux' must be in the format [OS]/[Arch]"),
},
"error if only osfamily is specified": {
in: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("linux"),
},
},
wanted: fmt.Errorf(`fields "osfamily" and "architecture" must either both be specified or both be empty`),
},
"error if only architecture is specified": {
in: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
Arch: aws.String("X86_64"),
},
},
wanted: fmt.Errorf(`fields "osfamily" and "architecture" must either both be specified or both be empty`),
},
"error if osfamily is invalid": {
in: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("foo"),
Arch: aws.String("amd64"),
},
},
wanted: fmt.Errorf("platform pair ('foo', 'amd64') is invalid: fields ('osfamily', 'architecture') must be one of ('linux', 'x86_64'), ('linux', 'amd64'), ('linux', 'arm'), ('linux', 'arm64'), ('windows', 'x86_64'), ('windows', 'amd64'), ('windows_server_2019_core', 'x86_64'), ('windows_server_2019_core', 'amd64'), ('windows_server_2019_full', 'x86_64'), ('windows_server_2019_full', 'amd64'), ('windows_server_2022_core', 'x86_64'), ('windows_server_2022_core', 'amd64'), ('windows_server_2022_full', 'x86_64'), ('windows_server_2022_full', 'amd64')"),
},
"error if arch is invalid": {
in: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("linux"),
Arch: aws.String("bar"),
},
},
wanted: fmt.Errorf("platform pair ('linux', 'bar') is invalid: fields ('osfamily', 'architecture') must be one of ('linux', 'x86_64'), ('linux', 'amd64'), ('linux', 'arm'), ('linux', 'arm64'), ('windows', 'x86_64'), ('windows', 'amd64'), ('windows_server_2019_core', 'x86_64'), ('windows_server_2019_core', 'amd64'), ('windows_server_2019_full', 'x86_64'), ('windows_server_2019_full', 'amd64'), ('windows_server_2022_core', 'x86_64'), ('windows_server_2022_core', 'amd64'), ('windows_server_2022_full', 'x86_64'), ('windows_server_2022_full', 'amd64')"),
},
"return nil if platform string valid": {
in: PlatformArgsOrString{PlatformString: (*PlatformString)(aws.String("linux/amd64"))},
},
"return nil if platform args valid": {
in: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("linux"),
Arch: aws.String("amd64"),
},
},
wanted: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestScalingConfigOrT_validate(t *testing.T) {
var (
time = 60 * time.Second
perc = Percentage(70)
)
testCases := map[string]struct {
ScalingConfig ScalingConfigOrT[Percentage]
wantedError error
wantedErrorMsgPrefix string
}{
"valid if only value is specified": {
ScalingConfig: ScalingConfigOrT[Percentage]{
Value: &perc,
},
},
"valid if only scaling config is specified": {
ScalingConfig: ScalingConfigOrT[Percentage]{
ScalingConfig: AdvancedScalingConfig[Percentage]{
Value: &perc,
Cooldown: Cooldown{
ScaleInCooldown: &time,
ScaleOutCooldown: &time,
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.ScalingConfig.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestAdvancedCount_validate(t *testing.T) {
var (
perc = Percentage(70)
invalidPerc = Percentage(-1)
timeMinute = time.Second * 60
mockConfig = ScalingConfigOrT[Percentage]{
Value: &perc,
}
invalidConfig = ScalingConfigOrT[Percentage]{
Value: &invalidPerc,
}
mockCooldown = Cooldown{
ScaleInCooldown: &timeMinute,
}
mockAdvancedInvConfig = ScalingConfigOrT[Percentage]{
ScalingConfig: AdvancedScalingConfig[Percentage]{
Value: &invalidPerc,
Cooldown: mockCooldown,
},
}
)
testCases := map[string]struct {
AdvancedCount AdvancedCount
wantedError error
wantedErrorMsgPrefix string
}{
"error if invalid autoscaling fields set": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(aws.String("1-10")),
},
CPU: mockConfig,
QueueScaling: QueueScaling{
AcceptableLatency: durationp(10 * time.Second),
AvgProcessingTime: durationp(1 * time.Second),
},
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedError: fmt.Errorf(`autoscaling field "queue_delay" is invalid with workload type Load Balanced Web Service`),
},
"error if multiple invalid autoscaling fields set": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(aws.String("1-10")),
},
CPU: mockConfig,
QueueScaling: QueueScaling{
AcceptableLatency: durationp(10 * time.Second),
AvgProcessingTime: durationp(1 * time.Second),
},
Requests: ScalingConfigOrT[int]{
Value: aws.Int(10),
},
ResponseTime: ScalingConfigOrT[time.Duration]{
Value: &timeMinute,
},
workloadType: manifestinfo.WorkerServiceType,
},
wantedError: fmt.Errorf(`autoscaling fields "requests" and "response_time" are invalid with workload type Worker Service`),
},
"cannot have autoscaling for scheduled jobs": {
AdvancedCount: AdvancedCount{
Spot: aws.Int(42),
workloadType: manifestinfo.ScheduledJobType,
},
wantedError: errors.New("cannot have autoscaling options for workloads of type 'Scheduled Job'"),
},
"valid if only spot is specified": {
AdvancedCount: AdvancedCount{
Spot: aws.Int(42),
workloadType: manifestinfo.BackendServiceType,
},
},
"valid when range and and at least one autoscaling fields are specified": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(aws.String("1-10")),
},
CPU: mockConfig,
QueueScaling: QueueScaling{
AcceptableLatency: durationp(10 * time.Second),
AvgProcessingTime: durationp(1 * time.Second),
},
workloadType: manifestinfo.WorkerServiceType,
},
},
"error if both spot and autoscaling fields are specified": {
AdvancedCount: AdvancedCount{
Spot: aws.Int(123),
CPU: mockConfig,
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedError: fmt.Errorf(`must specify one, not both, of "spot" and "range/cpu_percentage/memory_percentage/requests/response_time"`),
},
"error if fail to validate range": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(aws.String("")),
},
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedErrorMsgPrefix: `validate "range": `,
},
"error if range is missing when autoscaling fields are set for Load Balanced Web Service": {
AdvancedCount: AdvancedCount{
Requests: ScalingConfigOrT[int]{
Value: aws.Int(123),
},
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedError: fmt.Errorf(`"range" must be specified if "cpu_percentage", "memory_percentage", "requests" or "response_time" are specified`),
},
"error if range is specified but no autoscaling fields are specified for a Load Balanced Web Service": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(aws.String("1-10")),
},
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedError: fmt.Errorf(`must specify at least one of "cpu_percentage", "memory_percentage", "requests" or "response_time" if "range" is specified`),
},
"error if range is specified but no autoscaling fields are specified for a Backend Service": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(aws.String("1-10")),
},
workloadType: manifestinfo.BackendServiceType,
},
wantedError: fmt.Errorf(`must specify at least one of "cpu_percentage", "memory_percentage", "requests" or "response_time" if "range" is specified`),
},
"error if range is specified but no autoscaling fields are specified for a Worker Service": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(aws.String("1-10")),
},
workloadType: manifestinfo.WorkerServiceType,
},
wantedError: fmt.Errorf(`must specify at least one of "cpu_percentage", "memory_percentage" or "queue_delay" if "range" is specified`),
},
"error if cooldown is specified but no autoscaling fields are specified for a Load Balanced Web Service": {
AdvancedCount: AdvancedCount{
Cooldown: mockCooldown,
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedError: fmt.Errorf(`must specify at least one of "cpu_percentage", "memory_percentage", "requests" or "response_time" if "cooldown" is specified`),
},
"error if cooldown is specified but no autoscaling fields are specified for a Backend Service": {
AdvancedCount: AdvancedCount{
Cooldown: mockCooldown,
workloadType: manifestinfo.BackendServiceType,
},
wantedError: fmt.Errorf(`must specify at least one of "cpu_percentage", "memory_percentage", "requests" or "response_time" if "cooldown" is specified`),
},
"error if cooldown is specified but no autoscaling fields are specified for a Worker Service": {
AdvancedCount: AdvancedCount{
Cooldown: mockCooldown,
workloadType: manifestinfo.WorkerServiceType,
},
wantedError: fmt.Errorf(`must specify at least one of "cpu_percentage", "memory_percentage" or "queue_delay" if "cooldown" is specified`),
},
"error if range is missing when autoscaling fields are set for Backend Service": {
AdvancedCount: AdvancedCount{
CPU: mockConfig,
workloadType: manifestinfo.BackendServiceType,
},
wantedError: fmt.Errorf(`"range" must be specified if "cpu_percentage", "memory_percentage", "requests" or "response_time" are specified`),
},
"error if range is missing when autoscaling fields are set for Worker Service": {
AdvancedCount: AdvancedCount{
CPU: mockConfig,
workloadType: manifestinfo.WorkerServiceType,
},
wantedError: fmt.Errorf(`"range" must be specified if "cpu_percentage", "memory_percentage" or "queue_delay" are specified`),
},
"wrap error from queue_delay on failure": {
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(1),
Max: aws.Int(10),
SpotFrom: aws.Int(6),
},
},
QueueScaling: QueueScaling{
AcceptableLatency: nil,
AvgProcessingTime: durationp(1 * time.Second),
},
workloadType: manifestinfo.WorkerServiceType,
},
wantedErrorMsgPrefix: `validate "queue_delay": `,
},
"error if CPU config is not valid": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(stringP("1-2")),
},
CPU: invalidConfig,
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedErrorMsgPrefix: `validate "cpu_percentage": `,
},
"error if CPU advanced config is not valid": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(stringP("1-2")),
},
CPU: mockAdvancedInvConfig,
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedErrorMsgPrefix: `validate "cpu_percentage": `,
},
"error if memory config is not valid": {
AdvancedCount: AdvancedCount{
Range: Range{
Value: (*IntRangeBand)(stringP("1-2")),
},
Memory: invalidConfig,
workloadType: manifestinfo.LoadBalancedWebServiceType,
},
wantedErrorMsgPrefix: `validate "memory_percentage": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.AdvancedCount.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestPercentage_validate(t *testing.T) {
testCases := map[string]struct {
in Percentage
wanted error
}{
"should return an error if percentage is not valid": {
in: Percentage(120),
wanted: errors.New("percentage value 120 must be an integer from 0 to 100"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestQueueScaling_validate(t *testing.T) {
testCases := map[string]struct {
in QueueScaling
wanted error
}{
"should return an error if only msg_processing_time is specified": {
in: QueueScaling{
AvgProcessingTime: durationp(1 * time.Second),
},
wanted: errors.New(`"acceptable_latency" must be specified if "msg_processing_time" is specified`),
},
"should return an error if only acceptable_latency is specified": {
in: QueueScaling{
AcceptableLatency: durationp(1 * time.Second),
},
wanted: errors.New(`"msg_processing_time" must be specified if "acceptable_latency" is specified`),
},
"should return an error if the msg_processing_time is 0": {
in: QueueScaling{
AcceptableLatency: durationp(1 * time.Second),
AvgProcessingTime: durationp(0 * time.Second),
},
wanted: errors.New(`"msg_processing_time" cannot be 0`),
},
"should return an error if the msg_processing_time is longer than acceptable_latency": {
in: QueueScaling{
AcceptableLatency: durationp(500 * time.Millisecond),
AvgProcessingTime: durationp(1 * time.Second),
},
wanted: errors.New(`"msg_processing_time" cannot be longer than "acceptable_latency"`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestIntRangeBand_validate(t *testing.T) {
testCases := map[string]struct {
IntRangeBand IntRangeBand
wantedError error
}{
"error if range value is in invalid format": {
IntRangeBand: IntRangeBand(*aws.String("")),
wantedError: fmt.Errorf("invalid range value : valid format is ${min}-${max}"),
},
"error if range min is greater than max": {
IntRangeBand: IntRangeBand(*aws.String("6-4")),
wantedError: fmt.Errorf("min value 6 cannot be greater than max value 4"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.IntRangeBand.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestRangeConfig_validate(t *testing.T) {
testCases := map[string]struct {
RangeConfig RangeConfig
wantedError error
}{
"error if max is not set": {
RangeConfig: RangeConfig{
Min: aws.Int(2),
},
wantedError: fmt.Errorf(`"min/max" must be specified`),
},
"error if range min is greater than max": {
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(1),
},
wantedError: fmt.Errorf("min value 2 cannot be greater than max value 1"),
},
"error if spot_from value is negative": {
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(10),
SpotFrom: aws.Int(-3),
},
wantedError: fmt.Errorf("min value 2, max value 10, and spot_from value -3 must all be positive"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.RangeConfig.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestStorage_validate(t *testing.T) {
testCases := map[string]struct {
Storage Storage
wantedErrorMsgPrefix string
wantedError error
}{
"error if ephemeral is invalid": {
Storage: Storage{
Ephemeral: aws.Int(19),
},
wantedError: fmt.Errorf(`validate "ephemeral": ephemeral storage must be between 20 GiB and 200 GiB`),
},
"error if fail to validate volumes": {
Storage: Storage{
Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
},
},
},
wantedErrorMsgPrefix: `validate "volumes[foo]": `,
},
"error if storage has more than one managed volume": {
Storage: Storage{
Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
"bar": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
},
},
wantedError: fmt.Errorf("cannot specify more than one managed volume per service"),
},
"valid": {
Storage: Storage{
Volumes: map[string]*Volume{
"foo": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
"bar": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(false),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
"foobar": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("fs-1234567"),
},
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.Storage.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
if tc.wantedErrorMsgPrefix != "" {
require.Error(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorMsgPrefix)
return
}
require.NoError(t, gotErr)
})
}
}
func TestVolume_validate(t *testing.T) {
testCases := map[string]struct {
Volume Volume
wantedErrorPrefix string
}{
"error if fail to validate efs": {
Volume: Volume{
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
UID: aws.Uint32(123),
RootDirectory: aws.String("mockDir"),
},
},
},
wantedErrorPrefix: `validate "efs": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.Volume.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestEFSVolumeConfiguration_validate(t *testing.T) {
testCases := map[string]struct {
EFSVolumeConfiguration EFSVolumeConfiguration
wantedError error
}{
"error if uid/gid are specified with id/root_dir/auth": {
EFSVolumeConfiguration: EFSVolumeConfiguration{
UID: aws.Uint32(123),
AuthConfig: AuthorizationConfig{IAM: aws.Bool(true)},
},
wantedError: fmt.Errorf(`must specify one, not both, of "uid/gid" and "id/root_dir/auth"`),
},
"error if uid is set but gid is not": {
EFSVolumeConfiguration: EFSVolumeConfiguration{
UID: aws.Uint32(123),
},
wantedError: fmt.Errorf(`"gid" must be specified if "uid" is specified`),
},
"error if gid is set but uid is not": {
EFSVolumeConfiguration: EFSVolumeConfiguration{
GID: aws.Uint32(123),
},
wantedError: fmt.Errorf(`"uid" must be specified if "gid" is specified`),
},
"error if uid is 0": {
EFSVolumeConfiguration: EFSVolumeConfiguration{
UID: aws.Uint32(0),
GID: aws.Uint32(0),
},
wantedError: fmt.Errorf(`"uid" must not be 0`),
},
"error if AuthorizationConfig is not configured correctly": {
EFSVolumeConfiguration: EFSVolumeConfiguration{
AuthConfig: AuthorizationConfig{
AccessPointID: aws.String("mockID"),
},
RootDirectory: aws.String("mockDir"),
},
wantedError: fmt.Errorf(`"root_dir" must be either empty or "/" and "auth.iam" must be true when "access_point_id" is used`),
},
"error if root_dir is invalid": {
EFSVolumeConfiguration: EFSVolumeConfiguration{
RootDirectory: aws.String("!!!!"),
},
wantedError: fmt.Errorf(`validate "root_dir": path can only contain the characters a-zA-Z0-9.-_/`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.EFSVolumeConfiguration.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestSidecarConfig_validate(t *testing.T) {
testCases := map[string]struct {
config SidecarConfig
wantedErrorPrefix string
}{
"error if fail to validate mount_points": {
config: SidecarConfig{
MountPoints: []SidecarMountPoint{
{},
},
},
wantedErrorPrefix: `validate "mount_points[0]": `,
},
"error if fail to validate depends_on": {
config: SidecarConfig{
DependsOn: DependsOn{
"foo": "bar",
},
},
wantedErrorPrefix: `validate "depends_on": `,
},
"error if invalid env file": {
config: SidecarConfig{
EnvFile: aws.String("foo"),
},
wantedErrorPrefix: `environment file foo must`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestSidecarMountPoint_validate(t *testing.T) {
testCases := map[string]struct {
in SidecarMountPoint
wanted error
}{
"should return an error if source_volume is not set": {
in: SidecarMountPoint{},
wanted: errors.New(`"source_volume" must be specified`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestMountPointOpts_validate(t *testing.T) {
testCases := map[string]struct {
in MountPointOpts
wanted error
}{
"should return an error if path is not set": {
in: MountPointOpts{},
wanted: errors.New(`"path" must be specified`),
},
"should return an error if path is invalid": {
in: MountPointOpts{
ContainerPath: aws.String("!!!!!!"),
},
wanted: errors.New(`validate "path": path can only contain the characters a-zA-Z0-9.-_/`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestNetworkConfig_validate(t *testing.T) {
testCases := map[string]struct {
config NetworkConfig
wantedErrorPrefix string
}{
"error if fail to validate vpc": {
config: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
},
wantedErrorPrefix: `validate "vpc": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestRequestDrivenWebServiceNetworkConfig_validate(t *testing.T) {
testCases := map[string]struct {
config RequestDrivenWebServiceNetworkConfig
wantedErrorPrefix string
}{
"error if fail to validate vpc": {
config: RequestDrivenWebServiceNetworkConfig{
VPC: rdwsVpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
},
wantedErrorPrefix: `validate "vpc": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestRdwsVpcConfig_validate(t *testing.T) {
testCases := map[string]struct {
config rdwsVpcConfig
wantedErrorPrefix string
}{
"error if fail to validate placement": {
config: rdwsVpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
wantedErrorPrefix: `validate "placement": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestVpcConfig_validate(t *testing.T) {
testCases := map[string]struct {
config vpcConfig
wantedErrorPrefix string
}{
"error if fail to validate placement": {
config: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: (*PlacementString)(aws.String("")),
},
},
wantedErrorPrefix: `validate "placement": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestPlacementString_validate(t *testing.T) {
mockEmptyPlacement := PlacementString("")
mockInvalidPlacement := PlacementString("external")
testCases := map[string]struct {
in *PlacementString
wanted error
}{
"should return an error if placement is empty": {
in: &mockEmptyPlacement,
wanted: errors.New(`"placement" cannot be empty`),
},
"should return an error if placement is invalid": {
in: &mockInvalidPlacement,
wanted: errors.New(`"placement" external must be one of public, private`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestAppRunnerInstanceConfig_validate(t *testing.T) {
testCases := map[string]struct {
config AppRunnerInstanceConfig
wantedErrorPrefix string
wantedError error
}{
"error if fail to validate platforms": {
config: AppRunnerInstanceConfig{
Platform: PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("")),
},
},
wantedErrorPrefix: `validate "platform": `,
},
"error if windows os in PlatformString": {
config: AppRunnerInstanceConfig{
Platform: PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("windows/amd64")),
},
},
wantedError: fmt.Errorf("Windows is not supported for App Runner services"),
},
"error if windows os in PlatformArgs": {
config: AppRunnerInstanceConfig{
CPU: nil,
Memory: nil,
Platform: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows"),
Arch: aws.String("amd64"),
},
},
},
wantedError: fmt.Errorf("Windows is not supported for App Runner services"),
},
"error if invalid arch in PlatformString": {
config: AppRunnerInstanceConfig{
Platform: PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("linux"),
Arch: aws.String("leg64"),
},
},
},
wantedError: fmt.Errorf("validate \"platform\": platform pair ('linux', 'leg64') is invalid: fields ('osfamily', 'architecture') must be one of ('linux', 'x86_64'), ('linux', 'amd64'), ('linux', 'arm'), ('linux', 'arm64'), ('windows', 'x86_64'), ('windows', 'amd64'), ('windows_server_2019_core', 'x86_64'), ('windows_server_2019_core', 'amd64'), ('windows_server_2019_full', 'x86_64'), ('windows_server_2019_full', 'amd64'), ('windows_server_2022_core', 'x86_64'), ('windows_server_2022_core', 'amd64'), ('windows_server_2022_full', 'x86_64'), ('windows_server_2022_full', 'amd64')"),
},
"error if App Runner + ARM": {
config: AppRunnerInstanceConfig{
Platform: PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("linux/arm64")),
},
},
wantedError: fmt.Errorf("App Runner services can only build on amd64 and x86_64 architectures"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestObservability_validate(t *testing.T) {
testCases := map[string]struct {
config Observability
wantedErrorPrefix string
}{
"error if tracing has invalid vendor": {
config: Observability{
Tracing: aws.String("unknown-vendor"),
},
wantedErrorPrefix: `invalid tracing vendor unknown-vendor: `,
},
"ok if tracing is aws-xray": {
config: Observability{
Tracing: aws.String("awsxray"),
},
},
"ok if observability is empty": {
config: Observability{},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.NotNil(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestJobTriggerConfig_validate(t *testing.T) {
testCases := map[string]struct {
in *JobTriggerConfig
wanted error
}{
"should return an error if schedule is empty": {
in: &JobTriggerConfig{},
wanted: errors.New(`"schedule" must be specified`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestPublishConfig_validate(t *testing.T) {
testCases := map[string]struct {
config PublishConfig
wantedErrorPrefix string
}{
"error if fail to validate topics": {
config: PublishConfig{
Topics: []Topic{
{},
},
},
wantedErrorPrefix: `validate "topics[0]": `,
},
"error if empty topic name": {
config: PublishConfig{
Topics: []Topic{
{
Name: aws.String(""),
},
},
},
wantedErrorPrefix: `validate "topics[0]": `,
},
"error if invalid topic name": {
config: PublishConfig{
Topics: []Topic{
{
Name: aws.String("mytopic.lifo"),
},
},
},
wantedErrorPrefix: `validate "topics[0]": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestTopic_validate(t *testing.T) {
testCases := map[string]struct {
in Topic
wanted error
}{
"should return an error if name is empty": {
in: Topic{},
wanted: errors.New(`"name" must be specified`),
},
"should return an error if name is not valid": {
in: Topic{
Name: aws.String("!@#"),
},
wanted: errors.New(`"name" can only contain letters, numbers, underscores, and hyphens`),
},
"should not return an error if name is valid": {
in: Topic{
Name: aws.String("validtopic"),
},
wanted: nil,
},
"should not return an error if name is valid with fifo enabled": {
in: Topic{
Name: aws.String("validtopic"),
FIFO: FIFOTopicAdvanceConfigOrBool{
Enable: aws.Bool(true),
},
},
wanted: nil,
},
"should not return an error if name is valid with advanced fifo config": {
in: Topic{
Name: aws.String("validtopic"),
FIFO: FIFOTopicAdvanceConfigOrBool{
Advanced: FIFOTopicAdvanceConfig{
ContentBasedDeduplication: aws.Bool(true),
},
},
},
wanted: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestSubscribeConfig_validate(t *testing.T) {
testCases := map[string]struct {
config SubscribeConfig
wantedErrorPrefix string
}{
"error if fail to validate topics": {
config: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("mockTopic"),
},
},
},
wantedErrorPrefix: `validate "topics[0]": `,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.config.validate()
if tc.wantedErrorPrefix != "" {
require.Contains(t, gotErr.Error(), tc.wantedErrorPrefix)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestTopicSubscription_validate(t *testing.T) {
duration111Seconds := 111 * time.Second
testCases := map[string]struct {
in TopicSubscription
wanted error
}{
"should return an error if topic name is empty": {
in: TopicSubscription{},
wanted: errors.New(`"name" must be specified`),
},
"should return an error if service is empty": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
},
wanted: errors.New(`"service" must be specified`),
},
"should return an error if service is in invalid format": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("!!!!!"),
},
wanted: errors.New("service name must start with a letter, contain only lower-case letters, numbers, and hyphens, and have no consecutive or trailing hyphen"),
},
"should not return an error if service is in valid format": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
},
wanted: nil,
},
"should not return error if standard queue is enabled": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Enabled: aws.Bool(true),
},
},
wanted: nil,
},
"should not return error if fifo queue is enabled": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
FIFO: FIFOAdvanceConfigOrBool{
Enable: aws.Bool(true),
},
},
},
},
wanted: nil,
},
"should return error if invalid fifo throughput limit": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
FIFOThroughputLimit: aws.String("incorrectFIFOThoughoutLimit"),
},
},
},
},
},
wanted: errors.New(`validate "queue": validate "throughput_limit": fifo throughput limit value must be one of perMessageGroupId or perQueue`),
},
"should not return error if valid fifo throughput limit": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
FIFOThroughputLimit: aws.String(sqsFIFOThroughputLimitPerMessageGroupID),
},
},
},
},
},
wanted: nil,
},
"should return error if invalid deduplicate scope": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
DeduplicationScope: aws.String("incorrectDeduplicateScope"),
},
},
},
},
},
wanted: errors.New(`validate "queue": validate "deduplication_scope": deduplication scope value must be one of messageGroup or queue`),
},
"should not return error if valid deduplicate scope": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
DeduplicationScope: aws.String(sqsDeduplicationScopeMessageGroup),
},
},
},
},
},
wanted: nil,
},
"should return error if high_throughput is defined along with deduplication_scope": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
HighThroughputFifo: aws.Bool(true),
DeduplicationScope: aws.String(sqsDeduplicationScopeMessageGroup),
},
},
},
},
},
wanted: errors.New(`validate "queue": must specify one, not both, of "high_throughput" and "deduplication_scope"`),
},
"should return error if high_throughput is defined along with throughput_limit": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
HighThroughputFifo: aws.Bool(true),
FIFOThroughputLimit: aws.String(sqsFIFOThroughputLimitPerMessageGroupID),
},
},
},
},
},
wanted: errors.New(`validate "queue": must specify one, not both, of "high_throughput" and "throughput_limit"`),
},
"should return error if invalid combination of deduplication_scope and throughput_limit is defined": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
FIFOThroughputLimit: aws.String(sqsFIFOThroughputLimitPerMessageGroupID),
DeduplicationScope: aws.String(sqsDeduplicationScopeQueue),
},
},
},
},
},
wanted: errors.New(`validate "queue": "throughput_limit" must be set to "perQueue" when "deduplication_scope" is set to "queue"`),
},
"should not return error if valid standard queue config defined": {
in: TopicSubscription{
Name: aws.String("mockTopic"),
Service: aws.String("mockservice"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
},
},
},
wanted: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestOverrideRule_validate(t *testing.T) {
testCases := map[string]struct {
in OverrideRule
wanted error
}{
"should return an error if override rule is invalid": {
in: OverrideRule{
Path: "ContainerDefinitions[1].Name",
},
wanted: errors.New(`"ContainerDefinitions\[\d+\].Name" cannot be overridden with a custom value`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestValidateLoadBalancerTarget(t *testing.T) {
testCases := map[string]struct {
in validateTargetContainerOpts
wanted error
}{
"should return an error if target container doesn't exist": {
in: validateTargetContainerOpts{
mainContainerName: "mockMainContainer",
targetContainer: aws.String("foo"),
},
wanted: fmt.Errorf(`target container "foo" doesn't exist`),
},
"should return an error if target container doesn't expose a port": {
in: validateTargetContainerOpts{
mainContainerName: "mockMainContainer",
targetContainer: aws.String("foo"),
sidecarConfig: map[string]*SidecarConfig{
"foo": {},
},
},
wanted: fmt.Errorf(`target container "foo" doesn't expose a port`),
},
"success with no target container set": {
in: validateTargetContainerOpts{
mainContainerName: "mockMainContainer",
targetContainer: nil,
sidecarConfig: map[string]*SidecarConfig{
"foo": {},
},
},
},
"success": {
in: validateTargetContainerOpts{
mainContainerName: "mockMainContainer",
targetContainer: aws.String("foo"),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := validateTargetContainer(tc.in)
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestValidateLogging(t *testing.T) {
testCases := map[string]struct {
in Logging
wantedError error
}{
"should return an error if env file has wrong extension": {
in: Logging{
EnvFile: aws.String("path/to/envFile.sh"),
},
wantedError: fmt.Errorf("environment file path/to/envFile.sh must have a .env file extension"),
},
"success": {
in: Logging{
EnvFile: aws.String("test.env"),
},
wantedError: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedError != nil {
require.EqualError(t, gotErr, tc.wantedError.Error())
return
}
require.NoError(t, gotErr)
})
}
}
func TestValidateContainerDeps(t *testing.T) {
testCases := map[string]struct {
in validateDependenciesOpts
wanted error
}{
"should return an error if main container dependencies status is invalid": {
in: validateDependenciesOpts{
mainContainerName: "mockMainContainer",
imageConfig: Image{
DependsOn: DependsOn{
"mockMainContainer": "complete",
},
},
},
wanted: fmt.Errorf("validate mockMainContainer container dependencies status: essential container mockMainContainer can only have status START or HEALTHY"),
},
"should return an error if sidecar container dependencies status is invalid": {
in: validateDependenciesOpts{
mainContainerName: "mockMainContainer",
sidecarConfig: map[string]*SidecarConfig{
"foo": {
DependsOn: DependsOn{
"mockMainContainer": "success",
},
},
},
},
wanted: fmt.Errorf("validate foo container dependencies status: essential container mockMainContainer can only have status START or HEALTHY"),
},
"should return an error if a main container dependency does not exist": {
in: validateDependenciesOpts{
mainContainerName: "mockMainContainer",
imageConfig: Image{
DependsOn: DependsOn{
"foo": "healthy",
},
},
},
wanted: fmt.Errorf("container foo does not exist"),
},
"should return an error if a firelens container does not exist": {
in: validateDependenciesOpts{
mainContainerName: "mockMainContainer",
imageConfig: Image{
DependsOn: DependsOn{
"firelens_log_router": "start",
},
},
},
wanted: fmt.Errorf("container firelens_log_router does not exist"),
},
"should return an error if a sidecar container dependency does not exist": {
in: validateDependenciesOpts{
mainContainerName: "mockMainContainer",
sidecarConfig: map[string]*SidecarConfig{
"foo": {
DependsOn: DependsOn{
"bar": "healthy",
},
},
},
},
wanted: fmt.Errorf("container bar does not exist"),
},
"should return an error if container depends on itself": {
in: validateDependenciesOpts{
mainContainerName: "mockMainContainer",
imageConfig: Image{
DependsOn: DependsOn{
"mockMainContainer": "healthy",
},
},
},
wanted: fmt.Errorf("container mockMainContainer cannot depend on itself"),
},
"should return an error if container dependencies graph is cyclic": {
in: validateDependenciesOpts{
mainContainerName: "alpha",
imageConfig: Image{
DependsOn: DependsOn{
"beta": "healthy",
},
},
sidecarConfig: map[string]*SidecarConfig{
"beta": {
DependsOn: DependsOn{
"gamma": "healthy",
},
},
"gamma": {
DependsOn: DependsOn{
"alpha": "healthy",
},
},
"zeta": {
DependsOn: DependsOn{
"alpha": "healthy",
},
},
},
},
wanted: fmt.Errorf("circular container dependency chain includes the following containers: [alpha beta gamma]"),
},
"success": {
in: validateDependenciesOpts{
mainContainerName: "alpha",
imageConfig: Image{
DependsOn: DependsOn{
"firelens_log_router": "start",
"beta": "complete",
},
},
logging: Logging{
Image: aws.String("foobar"),
},
sidecarConfig: map[string]*SidecarConfig{
"beta": {
Essential: aws.Bool(false),
DependsOn: DependsOn{
"firelens_log_router": "start",
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := validateContainerDeps(tc.in)
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestValidateWindows(t *testing.T) {
testCases := map[string]struct {
in validateWindowsOpts
wantedError error
}{
"error if efs specified": {
in: validateWindowsOpts{
efsVolumes: map[string]*Volume{
"someVolume": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
MountPointOpts: MountPointOpts{
ContainerPath: aws.String("mockPath"),
},
},
},
},
wantedError: errors.New(`'EFS' is not supported when deploying a Windows container`),
},
"should return nil when no fields are specified": {
in: validateWindowsOpts{},
wantedError: nil,
},
"error if readonlyfs is true": {
in: validateWindowsOpts{
readOnlyFS: aws.Bool(true),
},
wantedError: fmt.Errorf(`%q can not be set to 'true' when deploying a Windows container`, "readonly_fs"),
},
"should return nil if readonly_fs is false": {
in: validateWindowsOpts{
readOnlyFS: aws.Bool(false),
},
wantedError: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := validateWindows(tc.in)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestValidateARM(t *testing.T) {
testCases := map[string]struct {
in validateARMOpts
wantedError error
}{
"should return an error if Spot specified inline": {
in: validateARMOpts{
Spot: aws.Int(2),
},
wantedError: fmt.Errorf(`'Fargate Spot' is not supported when deploying on ARM architecture`),
},
"should return an error if Spot specified with spot_from": {
in: validateARMOpts{
SpotFrom: aws.Int(2),
},
wantedError: fmt.Errorf(`'Fargate Spot' is not supported when deploying on ARM architecture`),
},
"should return nil if Spot not specified": {
in: validateARMOpts{
Spot: nil,
},
wantedError: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := validateARM(tc.in)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestDeploymentConfig_validate(t *testing.T) {
testCases := map[string]struct {
deployConfig DeploymentConfig
wanted string
}{
"error if deploy config has invalid rolling strategy": {
deployConfig: DeploymentConfig{
DeploymentControllerConfig: DeploymentControllerConfig{
Rolling: aws.String("unknown"),
}},
wanted: `invalid rolling deployment strategy "unknown", must be one of default or recreate`,
},
"ok if deployment strategy is recreate": {
deployConfig: DeploymentConfig{
DeploymentControllerConfig: DeploymentControllerConfig{
Rolling: aws.String("recreate"),
}},
},
"ok if deployment strategy is default": {
deployConfig: DeploymentConfig{
DeploymentControllerConfig: DeploymentControllerConfig{
Rolling: aws.String("default"),
}},
},
"ok if deployment is empty": {
deployConfig: DeploymentConfig{},
},
"ok if deployment strategy is empty but alarm indicated": {
deployConfig: DeploymentConfig{
RollbackAlarms: BasicToUnion[[]string, AlarmArgs]([]string{"alarmName"})},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.deployConfig.validate()
if tc.wanted != "" {
require.NotNil(t, gotErr)
require.Contains(t, gotErr.Error(), tc.wanted)
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestFromEnvironment_validate(t *testing.T) {
testCases := map[string]struct {
in fromCFN
wantedError error
}{
"error if name is an empty string": {
in: fromCFN{
Name: aws.String(""),
},
wantedError: errors.New("name cannot be an empty string"),
},
"ok": {
in: fromCFN{
Name: aws.String("db"),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gotErr := tc.in.validate()
if tc.wantedError != nil {
require.NotNil(t, gotErr)
require.EqualError(t, gotErr, tc.wantedError.Error())
} else {
require.NoError(t, gotErr)
}
})
}
}
func TestValidateExposedPorts(t *testing.T) {
testCases := map[string]struct {
in validateExposedPortsOpts
wanted error
}{
"should return an error if main container and sidecar container is exposing the same port": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(80),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
},
wanted: fmt.Errorf(`containers "foo" and "mockMainContainer" are exposing the same port 80`),
},
"should not error out when alb target_port is same as that of sidecar container port but target_container is empty": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(8080),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(80),
},
},
},
wanted: nil,
},
"should return an error if alb target_port points to one sidecar container port and target_container points to another sidecar container": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(5000),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("8080"),
},
"nginx": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetContainer: aws.String("nginx"),
TargetPort: aws.Uint16(8080),
},
},
},
wanted: fmt.Errorf(`containers "nginx" and "foo" are exposing the same port 8080`),
},
"should not return an error if main container and sidecar container is exposing different ports": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(8080),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
},
wanted: nil,
},
"doesn't error out when similar config is present in target_port and target_container as that of primary container": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(8080),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(8080),
TargetContainer: aws.String("mockMainContainer"),
},
},
},
wanted: nil,
},
"doesn't error out when similar config is present in target_port and target_container as that of sidecar container": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(8080),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(80),
TargetContainer: aws.String("foo"),
},
},
},
wanted: nil,
},
"doesn't error out when target_port exposing different port of the primary container than its main port": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(8080),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(8081),
},
},
},
wanted: nil,
},
"doesn't error out when multiple ports are open through additional_rules": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(8080),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(8081),
},
AdditionalRoutingRules: []RoutingRule{
{
TargetPort: aws.Uint16(8082),
},
{
TargetPort: aws.Uint16(8083),
},
},
},
},
wanted: nil,
},
"should not return an error if alb and nlb target_port trying to expose same container port of the primary container": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(5000),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("8080"),
},
"nginx": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(5001),
},
},
nlb: &NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("5001/tcp"),
TargetPort: aws.Int(5001),
},
},
},
},
"should not return an error if alb and nlb target_port trying to expose same container port sidecar container": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(5000),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("8080"),
},
"nginx": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(5001),
TargetContainer: aws.String("foo"),
},
},
nlb: &NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("5001/tcp"),
TargetPort: aws.Int(5001),
TargetContainer: aws.String("foo"),
},
},
},
},
"should return an error if alb and nlb target_port trying to expose same container port of different containers": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(5000),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("8080"),
},
"nginx": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(5001),
TargetContainer: aws.String("foo"),
},
},
nlb: &NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("5001/tcp"),
TargetPort: aws.Int(5001),
TargetContainer: aws.String("nginx"),
},
},
},
wanted: fmt.Errorf(`validate "nlb": containers "nginx" and "foo" are exposing the same port 5001`),
},
"should not return an error if nlb is trying to expose multiple ports": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(5000),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("8080"),
},
"nginx": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(5001),
TargetContainer: aws.String("foo"),
},
},
nlb: &NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("5001/tcp"),
TargetPort: aws.Int(5001),
TargetContainer: aws.String("foo"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("5002/tcp"),
TargetPort: aws.Int(5002),
TargetContainer: aws.String("foo"),
},
},
},
},
},
"should return an error if nlb is trying to expose same port as from different containers using additional listeners": {
in: validateExposedPortsOpts{
mainContainerName: "mockMainContainer",
mainContainerPort: aws.Uint16(5000),
sidecarConfig: map[string]*SidecarConfig{
"foo": {
Port: aws.String("8080"),
},
"nginx": {
Port: aws.String("80"),
},
},
alb: &HTTP{
Main: RoutingRule{
TargetPort: aws.Uint16(5001),
TargetContainer: aws.String("foo"),
},
},
nlb: &NetworkLoadBalancerConfiguration{
Listener: NetworkLoadBalancerListener{
Port: aws.String("5001/tcp"),
TargetPort: aws.Int(5001),
TargetContainer: aws.String("foo"),
},
AdditionalListeners: []NetworkLoadBalancerListener{
{
Port: aws.String("5002/tcp"),
TargetPort: aws.Int(5001),
TargetContainer: aws.String("nginx"),
},
},
},
},
wanted: fmt.Errorf(`validate "nlb.additional_listeners[0]": containers "nginx" and "foo" are exposing the same port 5001`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := validateExposedPorts(tc.in)
if tc.wanted != nil {
require.EqualError(t, err, tc.wanted.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestImageLocationOrBuild_validate(t *testing.T) {
testCases := map[string]struct {
in ImageLocationOrBuild
wantedError error
}{
"should return error if both build and location are specified": {
in: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("web/Dockerfile")},
Location: aws.String("mockLocation"),
},
wantedError: fmt.Errorf(`must specify one of "build" and "location"`),
},
"return nil if only build is specified": {
in: ImageLocationOrBuild{
Build: BuildArgsOrString{BuildString: aws.String("web/Dockerfile")},
},
},
"return nil if only location is specified": {
in: ImageLocationOrBuild{
Location: aws.String("mockLocation"),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
err := tc.in.validate()
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
}
})
}
}
| 4,257 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/imdario/mergo"
"gopkg.in/yaml.v3"
)
const (
workerSvcManifestPath = "workloads/services/worker/manifest.yml"
)
var (
errUnmarshalQueueOpts = errors.New(`cannot unmarshal "queue" field into bool or map`)
errUnmarshalFifoConfig = errors.New(`unable to unmarshal "fifo" field into boolean or compose-style map`)
)
// WorkerService holds the configuration to create a worker service.
type WorkerService struct {
Workload `yaml:",inline"`
WorkerServiceConfig `yaml:",inline"`
// Use *WorkerServiceConfig because of https://github.com/imdario/mergo/issues/146
Environments map[string]*WorkerServiceConfig `yaml:",flow"`
parser template.Parser
}
// Publish returns the list of topics where notifications can be published.
func (s *WorkerService) Publish() []Topic {
return s.WorkerServiceConfig.PublishConfig.publishedTopics()
}
func (s *WorkerService) subnets() *SubnetListOrArgs {
return &s.Network.VPC.Placement.Subnets
}
// WorkerServiceConfig holds the configuration that can be overridden per environments.
type WorkerServiceConfig struct {
ImageConfig ImageWithHealthcheck `yaml:"image,flow"`
ImageOverride `yaml:",inline"`
TaskConfig `yaml:",inline"`
Logging Logging `yaml:"logging,flow"`
Sidecars map[string]*SidecarConfig `yaml:"sidecars"` // NOTE: keep the pointers because `mergo` doesn't automatically deep merge map's value unless it's a pointer type.
Subscribe SubscribeConfig `yaml:"subscribe"`
PublishConfig PublishConfig `yaml:"publish"`
Network NetworkConfig `yaml:"network"`
TaskDefOverrides []OverrideRule `yaml:"taskdef_overrides"`
DeployConfig WorkerDeploymentConfig `yaml:"deployment"`
Observability Observability `yaml:"observability"`
}
// SubscribeConfig represents the configurable options for setting up subscriptions.
type SubscribeConfig struct {
Topics []TopicSubscription `yaml:"topics"`
Queue SQSQueue `yaml:"queue"`
}
// IsEmpty returns empty if the struct has all zero members.
func (s *SubscribeConfig) IsEmpty() bool {
return s.Topics == nil && s.Queue.IsEmpty()
}
// TopicSubscription represents the configurable options for setting up a SNS Topic Subscription.
type TopicSubscription struct {
Name *string `yaml:"name"`
Service *string `yaml:"service"`
FilterPolicy map[string]interface{} `yaml:"filter_policy"`
Queue SQSQueueOrBool `yaml:"queue"`
}
// SQSQueueOrBool is a custom type which supports unmarshaling yaml which
// can either be of type bool or type SQSQueue.
type SQSQueueOrBool struct {
Advanced SQSQueue
Enabled *bool
}
// IsEmpty returns empty if the struct has all zero members.
func (q *SQSQueueOrBool) IsEmpty() bool {
return q.Advanced.IsEmpty() && q.Enabled == nil
}
// UnmarshalYAML implements the yaml(v3) interface. It allows SQSQueueOrBool to be specified as a
// string or a struct alternately.
func (q *SQSQueueOrBool) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&q.Advanced); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !q.Advanced.IsEmpty() {
// Unmarshaled successfully to q.Advanced, unset q.Enabled, and return.
q.Enabled = nil
return nil
}
if err := value.Decode(&q.Enabled); err != nil {
return errUnmarshalQueueOpts
}
return nil
}
// SQSQueue represents the configurable options for setting up a SQS Queue.
type SQSQueue struct {
Retention *time.Duration `yaml:"retention"`
Delay *time.Duration `yaml:"delay"`
Timeout *time.Duration `yaml:"timeout"`
DeadLetter DeadLetterQueue `yaml:"dead_letter"`
FIFO FIFOAdvanceConfigOrBool `yaml:"fifo"`
}
// FIFOAdvanceConfigOrBool represents the configurable options for fifo queues.
type FIFOAdvanceConfigOrBool struct {
Enable *bool
Advanced FIFOAdvanceConfig
}
// IsEmpty returns true if the FifoAdvanceConfigOrBool struct has all nil values.
func (f *FIFOAdvanceConfigOrBool) IsEmpty() bool {
return f.Enable == nil && f.Advanced.IsEmpty()
}
// IsEnabled returns true if the FifoAdvanceConfigOrBool struct has all nil values.
func (f *FIFOAdvanceConfigOrBool) IsEnabled() bool {
return aws.BoolValue(f.Enable) || !f.Advanced.IsEmpty()
}
// FIFOAdvanceConfig represents the advanced fifo queue config.
type FIFOAdvanceConfig struct {
ContentBasedDeduplication *bool `yaml:"content_based_deduplication"`
DeduplicationScope *string `yaml:"deduplication_scope"`
FIFOThroughputLimit *string `yaml:"throughput_limit"`
HighThroughputFifo *bool `yaml:"high_throughput"`
}
// IsEmpty returns true if the FifoAdvanceConfig struct has all nil values.
func (f *FIFOAdvanceConfig) IsEmpty() bool {
return f.FIFOThroughputLimit == nil && f.HighThroughputFifo == nil &&
f.DeduplicationScope == nil && f.ContentBasedDeduplication == nil
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the FifoAdvanceConfigOrBool
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (t *FIFOAdvanceConfigOrBool) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&t.Advanced); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !t.Advanced.IsEmpty() {
return nil
}
if err := value.Decode(&t.Enable); err != nil {
return errUnmarshalFifoConfig
}
return nil
}
// IsEmpty returns empty if the struct has all zero members.
func (q *SQSQueue) IsEmpty() bool {
return q.Retention == nil && q.Delay == nil && q.Timeout == nil &&
q.DeadLetter.IsEmpty() && q.FIFO.IsEmpty()
}
// DeadLetterQueue represents the configurable options for setting up a Dead-Letter Queue.
type DeadLetterQueue struct {
Tries *uint16 `yaml:"tries"`
}
// IsEmpty returns empty if the struct has all zero members.
func (q *DeadLetterQueue) IsEmpty() bool {
return q.Tries == nil
}
// WorkerServiceProps represents the configuration needed to create a worker service.
type WorkerServiceProps struct {
WorkloadProps
HealthCheck ContainerHealthCheck // Optional healthcheck configuration.
Platform PlatformArgsOrString // Optional platform configuration.
Topics []TopicSubscription // Optional topics for subscriptions
Queue SQSQueue // Optional queue configuration.
}
// NewWorkerService applies the props to a default Worker service configuration with
// minimal cpu/memory thresholds, single replica, no healthcheck, and then returns it.
func NewWorkerService(props WorkerServiceProps) *WorkerService {
svc := newDefaultWorkerService()
// Apply overrides.
svc.Name = stringP(props.Name)
svc.WorkerServiceConfig.ImageConfig.Image.Location = stringP(props.Image)
svc.WorkerServiceConfig.ImageConfig.Image.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile)
svc.WorkerServiceConfig.ImageConfig.HealthCheck = props.HealthCheck
svc.WorkerServiceConfig.Platform = props.Platform
if isWindowsPlatform(props.Platform) {
svc.WorkerServiceConfig.TaskConfig.CPU = aws.Int(MinWindowsTaskCPU)
svc.WorkerServiceConfig.TaskConfig.Memory = aws.Int(MinWindowsTaskMemory)
}
if len(props.Topics) > 0 {
setSubscriptionQueueDefaults(props.Topics, &svc.WorkerServiceConfig.Subscribe.Queue)
}
svc.WorkerServiceConfig.Subscribe.Topics = props.Topics
svc.WorkerServiceConfig.Platform = props.Platform
for _, envName := range props.PrivateOnlyEnvironments {
svc.Environments[envName] = &WorkerServiceConfig{
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
},
}
}
svc.parser = template.New()
return svc
}
// setSubscriptionQueueDefaults function modifies the manifest to have
// 1. FIFO Topic names without ".fifo" suffix.
// 2. If there are both FIFO and Standard topic subscriptions are specified then set
// default events queue to FIFO and add standard topic-specific queue for all the standard topic subscriptions.
// 3. If there are only Standard topic subscriptions are specified then do nothing and return.
func setSubscriptionQueueDefaults(topics []TopicSubscription, eventsQueue *SQSQueue) {
var isFIFOEnabled bool
for _, topic := range topics {
if isFIFO(aws.StringValue(topic.Name)) {
isFIFOEnabled = true
break
}
}
if !isFIFOEnabled {
return
}
eventsQueue.FIFO.Enable = aws.Bool(true)
for idx, topic := range topics {
if isFIFO(aws.StringValue(topic.Name)) {
topics[idx].Name = aws.String(strings.TrimSuffix(aws.StringValue(topic.Name), ".fifo"))
} else {
topics[idx].Queue.Enabled = aws.Bool(true)
}
}
}
func isFIFO(topic string) bool {
return strings.HasSuffix(topic, ".fifo")
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (s *WorkerService) MarshalBinary() ([]byte, error) {
content, err := s.parser.Parse(workerSvcManifestPath, *s, template.WithFuncs(map[string]interface{}{
"fmtSlice": template.FmtSliceFunc,
"quoteSlice": template.QuoteSliceFunc,
}))
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// BuildArgs returns a docker.BuildArguments object for the service given a context directory
func (s *WorkerService) BuildArgs(contextDir string) (map[string]*DockerBuildArgs, error) {
required, err := requiresBuild(s.ImageConfig.Image)
if err != nil {
return nil, err
}
// Creating an map to store buildArgs of all sidecar images and main container image.
buildArgsPerContainer := make(map[string]*DockerBuildArgs, len(s.Sidecars)+1)
if required {
buildArgsPerContainer[aws.StringValue(s.Name)] = s.ImageConfig.Image.BuildConfig(contextDir)
}
return buildArgs(contextDir, buildArgsPerContainer, s.Sidecars)
}
// EnvFiles returns the locations of all env files against the ws root directory.
// This method returns a map[string]string where the keys are container names
// and the values are either env file paths or empty strings.
func (s *WorkerService) EnvFiles() map[string]string {
return envFiles(s.Name, s.TaskConfig, s.Logging, s.Sidecars)
}
// Subscriptions returns a list of TopicSubscriotion objects which represent the SNS topics the service
// receives messages from. This method also appends ".fifo" to the topics and returns a new set of subs.
func (s *WorkerService) Subscriptions() []TopicSubscription {
var subs []TopicSubscription
for _, topic := range s.Subscribe.Topics {
topicSubscription := topic
// if condition appends .fifo suffix to the topic which doesn't have topic specific queue and subscribing to default FIFO queue.
if topic.Queue.IsEmpty() && !s.Subscribe.Queue.IsEmpty() && s.Subscribe.Queue.FIFO.IsEnabled() {
topicSubscription.Name = aws.String(aws.StringValue(topic.Name) + ".fifo")
} else if !topic.Queue.IsEmpty() && !topic.Queue.Advanced.IsEmpty() && topic.Queue.Advanced.FIFO.IsEnabled() { // else if condition appends .fifo suffix to the topic which has topic specific FIFO queue configuration.
topicSubscription.Name = aws.String(aws.StringValue(topic.Name) + ".fifo")
}
subs = append(subs, topicSubscription)
}
return subs
}
func (s WorkerService) applyEnv(envName string) (workloadManifest, error) {
overrideConfig, ok := s.Environments[envName]
if !ok {
return &s, nil
}
if overrideConfig == nil {
return &s, nil
}
// Apply overrides to the original service s.
for _, t := range defaultTransformers {
err := mergo.Merge(&s, WorkerService{
WorkerServiceConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithTransformers(t))
if err != nil {
return nil, err
}
}
s.Environments = nil
return &s, nil
}
func (s *WorkerService) requiredEnvironmentFeatures() []string {
var features []string
features = append(features, s.Network.requiredEnvFeatures()...)
features = append(features, s.Storage.requiredEnvFeatures()...)
return features
}
// newDefaultWorkerService returns a Worker service with minimal task sizes and a single replica.
func newDefaultWorkerService() *WorkerService {
return &WorkerService{
Workload: Workload{
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{},
Subscribe: SubscribeConfig{},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{ // Leave advanced count empty while passing down the type of the workload.
workloadType: manifestinfo.WorkerServiceType,
},
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{},
}
}
// ExposedPorts returns all the ports that are sidecar container ports available to receive traffic.
func (ws *WorkerService) ExposedPorts() (ExposedPortsIndex, error) {
var exposedPorts []ExposedPort
for name, sidecar := range ws.Sidecars {
out, err := sidecar.exposedPorts(name)
if err != nil {
return ExposedPortsIndex{}, err
}
exposedPorts = append(exposedPorts, out...)
}
portsForContainer, containerForPort := prepareParsedExposedPortsMap(sortExposedPorts(exposedPorts))
return ExposedPortsIndex{
PortsForContainer: portsForContainer,
ContainerForPort: containerForPort,
}, nil
}
| 393 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func newMockSQSQueueOrBool() SQSQueueOrBool {
return SQSQueueOrBool{
Advanced: newMockSQSQueue(),
}
}
func newMockSQSQueue() SQSQueue {
duration111Seconds := 111 * time.Second
return SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
}
}
func newMockSQSFIFOQueueOrBool() SQSQueueOrBool {
return SQSQueueOrBool{
Advanced: newMockSQSFIFOQueue(),
}
}
func newMockSQSFIFOQueue() SQSQueue {
duration111Seconds := 111 * time.Second
return SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
FIFO: FIFOAdvanceConfigOrBool{
Advanced: FIFOAdvanceConfig{
FIFOThroughputLimit: aws.String("perMessageID"),
},
},
}
}
func TestNewWorkerSvc(t *testing.T) {
testCases := map[string]struct {
inProps WorkerServiceProps
wantedManifest *WorkerService
}{
"should return a worker service instance": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
PrivateOnlyEnvironments: []string{
"metrics",
},
},
},
wantedManifest: &WorkerService{
Workload: Workload{
Name: aws.String("testers"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./testers/Dockerfile"),
},
},
},
},
},
Subscribe: SubscribeConfig{},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"metrics": {
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
},
},
},
},
},
"should return a worker service instance with subscribe": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
},
},
wantedManifest: &WorkerService{
Workload: Workload{
Name: aws.String("testers"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./testers/Dockerfile"),
},
},
},
},
},
Subscribe: SubscribeConfig{},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
},
},
"should return a worker service instance with 2 subscriptions to the default fifo queue and 2 standard topic specific queues": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
},
Topics: []TopicSubscription{
{
Name: aws.String("fifoTopic1.fifo"),
Service: aws.String("fifoService1"),
},
{
Name: aws.String("fifoTopic2.fifo"),
Service: aws.String("fifoService2"),
},
{
Name: aws.String("standardTopic1"),
Service: aws.String("standardService1"),
},
{
Name: aws.String("standardTopic2"),
Service: aws.String("standardService2"),
},
},
},
wantedManifest: &WorkerService{
Workload: Workload{
Name: aws.String("testers"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./testers/Dockerfile"),
},
},
},
},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("fifoTopic1"),
Service: aws.String("fifoService1"),
},
{
Name: aws.String("fifoTopic2"),
Service: aws.String("fifoService2"),
},
{
Name: aws.String("standardTopic1"),
Service: aws.String("standardService1"),
Queue: SQSQueueOrBool{Enabled: aws.Bool(true)},
},
{
Name: aws.String("standardTopic2"),
Service: aws.String("standardService2"),
Queue: SQSQueueOrBool{Enabled: aws.Bool(true)},
},
},
Queue: SQSQueue{
FIFO: FIFOAdvanceConfigOrBool{Enable: aws.Bool(true)},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
},
},
"should return a worker service instance with 2 subscriptions to the default fifo queue": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
},
Topics: []TopicSubscription{
{
Name: aws.String("fifoTopic1.fifo"),
Service: aws.String("fifoService1"),
},
{
Name: aws.String("fifoTopic2.fifo"),
Service: aws.String("fifoService2"),
},
},
},
wantedManifest: &WorkerService{
Workload: Workload{
Name: aws.String("testers"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./testers/Dockerfile"),
},
},
},
},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("fifoTopic1"),
Service: aws.String("fifoService1"),
},
{
Name: aws.String("fifoTopic2"),
Service: aws.String("fifoService2"),
},
},
Queue: SQSQueue{
FIFO: FIFOAdvanceConfigOrBool{Enable: aws.Bool(true)},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
},
},
"should return a worker service instance with 2 subscriptions to the default standard queue": {
inProps: WorkerServiceProps{
WorkloadProps: WorkloadProps{
Name: "testers",
Dockerfile: "./testers/Dockerfile",
},
Topics: []TopicSubscription{
{
Name: aws.String("standardTopic1"),
Service: aws.String("standardService1"),
},
{
Name: aws.String("standardTopic2"),
Service: aws.String("standardService2"),
},
},
},
wantedManifest: &WorkerService{
Workload: Workload{
Name: aws.String("testers"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./testers/Dockerfile"),
},
},
},
},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("standardTopic1"),
Service: aws.String("standardService1"),
},
{
Name: aws.String("standardTopic2"),
Service: aws.String("standardService2"),
},
},
Queue: SQSQueue{},
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
wantedBytes, err := yaml.Marshal(tc.wantedManifest)
require.NoError(t, err)
// WHEN
actualBytes, err := yaml.Marshal(NewWorkerService(tc.inProps))
require.NoError(t, err)
require.Equal(t, string(wantedBytes), string(actualBytes))
})
}
}
func TestWorkerSvc_ApplyEnv(t *testing.T) {
perc := Percentage(70)
mockConfig := ScalingConfigOrT[Percentage]{
Value: &perc,
}
mockWorkerServiceWithNoEnvironments := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
HealthCheck: ContainerHealthCheck{
Command: []string{"hello", "world"},
Interval: durationp(1 * time.Second),
Retries: aws.Int(100),
Timeout: durationp(100 * time.Minute),
StartPeriod: durationp(5 * time.Second),
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(256),
Count: Count{
Value: aws.Int(1),
},
},
},
}
mockWorkerServiceWithNilEnvironment := WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{},
},
},
Environments: map[string]*WorkerServiceConfig{
"test": nil,
},
}
mockWorkerServiceWithMinimalOverride := WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{},
},
},
Environments: map[string]*WorkerServiceConfig{
"test": {
ImageConfig: ImageWithHealthcheck{
Image: Image{},
},
},
},
}
mockWorkerServiceWithAllOverride := WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
DockerLabels: map[string]string{
"com.amazonaws.ecs.copilot.description": "Hello world!",
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(256),
Count: Count{
Value: aws.Int(1),
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000/udp"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
},
},
Logging: Logging{
Destination: map[string]string{
"Name": "datadog",
"exclude-pattern": "*",
},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("topicName"),
Service: aws.String("bestService"),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"test": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
DockerLabels: map[string]string{
"com.amazonaws.ecs.copilot.description": "Overridden!",
},
},
},
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
CPU: mockConfig,
},
},
CPU: aws.Int(512),
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP(""),
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
CredsParam: aws.String("some arn"),
},
},
Logging: Logging{
Destination: map[string]string{
"include-pattern": "*",
"exclude-pattern": "fe/",
},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("topicName2"),
Service: aws.String("bestService2"),
},
},
},
},
},
}
mockWorkerServiceWithImageOverrideBuildByLocation := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("./Dockerfile"),
},
},
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
}
mockWorkerServiceWithImageOverrideLocationByLocation := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("original location"),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
}
mockWorkerServiceWithImageOverrideBuildByBuild := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("original dockerfile"),
Context: aws.String("original context"),
},
},
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("env overridden dockerfile"),
},
},
},
},
},
},
}
mockWorkerServiceWithImageOverrideLocationByBuild := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("original location"),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"prod-iad": {
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("env overridden dockerfile"),
},
},
},
},
},
},
}
mockWorkerServiceWithSubscribeNilOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{},
},
},
}
mockWorkerServiceWithNilSubscribeOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
}
mockWorkerServiceWithEmptySubscribeOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{},
},
},
}
mockWorkerServiceWithSubscribeTopicNilOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: nil,
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
}
mockWorkerServiceWithNilSubscribeTopicOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{
Topics: nil,
},
},
},
}
mockWorkerServiceWithSubscribeTopicEmptyOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{},
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
}
mockWorkerServiceWithSubscribeQueueNilOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Queue: SQSQueue{},
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{
Queue: newMockSQSQueue(),
},
},
},
}
mockWorkerServiceWithNilSubscribeQueueOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Queue: newMockSQSQueue(),
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{
Queue: SQSQueue{},
},
},
},
}
mockWorkerServiceWithSubscribeQueueEmptyOverride := WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Queue: SQSQueue{},
},
},
Environments: map[string]*WorkerServiceConfig{
"test-sub": {
Subscribe: SubscribeConfig{
Queue: newMockSQSQueue(),
},
},
},
}
testCases := map[string]struct {
svc *WorkerService
inEnvName string
wanted *WorkerService
original *WorkerService
}{
"no env override": {
svc: &mockWorkerServiceWithNoEnvironments,
inEnvName: "test",
wanted: &mockWorkerServiceWithNoEnvironments,
original: &mockWorkerServiceWithNoEnvironments,
},
"with nil env override": {
svc: &mockWorkerServiceWithNilEnvironment,
inEnvName: "test",
wanted: &mockWorkerServiceWithNilEnvironment,
original: &mockWorkerServiceWithNilEnvironment,
},
"uses env minimal overrides": {
svc: &mockWorkerServiceWithMinimalOverride,
inEnvName: "test",
wanted: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{},
},
},
},
original: &mockWorkerServiceWithMinimalOverride,
},
"uses env all overrides": {
svc: &mockWorkerServiceWithAllOverride,
inEnvName: "test",
wanted: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
DockerLabels: map[string]string{
"com.amazonaws.ecs.copilot.description": "Overridden!",
},
},
},
TaskConfig: TaskConfig{
CPU: aws.Int(512),
Memory: aws.Int(256),
Count: Count{
AdvancedCount: AdvancedCount{
CPU: mockConfig,
},
},
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP(""),
},
},
},
},
Sidecars: map[string]*SidecarConfig{
"xray": {
Port: aws.String("2000/udp"),
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
CredsParam: aws.String("some arn"),
},
},
Logging: Logging{
Destination: map[string]string{
"Name": "datadog",
"include-pattern": "*",
"exclude-pattern": "fe/",
},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("topicName2"),
Service: aws.String("bestService2"),
},
},
},
},
},
original: &mockWorkerServiceWithAllOverride,
},
"with image build overridden by image location": {
svc: &mockWorkerServiceWithImageOverrideBuildByLocation,
inEnvName: "prod-iad",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
original: &mockWorkerServiceWithImageOverrideBuildByLocation,
},
"with image location overridden by image location": {
svc: &mockWorkerServiceWithImageOverrideLocationByLocation,
inEnvName: "prod-iad",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Location: aws.String("env-override location"),
},
},
},
},
},
original: &mockWorkerServiceWithImageOverrideLocationByLocation,
},
"with image build overridden by image build": {
svc: &mockWorkerServiceWithImageOverrideBuildByBuild,
inEnvName: "prod-iad",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("env overridden dockerfile"),
},
},
},
},
},
},
original: &mockWorkerServiceWithImageOverrideBuildByBuild,
},
"with image location overridden by image build": {
svc: &mockWorkerServiceWithImageOverrideLocationByBuild,
inEnvName: "prod-iad",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("env overridden dockerfile"),
},
},
},
},
},
},
original: &mockWorkerServiceWithImageOverrideLocationByBuild,
},
"with nil subscribe overriden by full subscribe": {
svc: &mockWorkerServiceWithNilSubscribeOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
original: &mockWorkerServiceWithNilSubscribeOverride,
},
"with full subscribe and nil subscribe env": {
svc: &mockWorkerServiceWithSubscribeNilOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
original: &mockWorkerServiceWithSubscribeNilOverride,
},
"with full subscribe and empty subscribe env": {
svc: &mockWorkerServiceWithEmptySubscribeOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
original: &mockWorkerServiceWithEmptySubscribeOverride,
},
"with nil subscribe topic overriden by full subscribe topic": {
svc: &mockWorkerServiceWithNilSubscribeTopicOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
original: &mockWorkerServiceWithNilSubscribeTopicOverride,
},
"with full subscribe topic and nil subscribe topic env": {
svc: &mockWorkerServiceWithSubscribeTopicNilOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
original: &mockWorkerServiceWithSubscribeTopicNilOverride,
},
"with empty subscribe topic overriden by full subscribe topic": {
svc: &mockWorkerServiceWithSubscribeTopicEmptyOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{},
},
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
original: &mockWorkerServiceWithSubscribeTopicEmptyOverride,
},
"with nil subscribe queue overriden by full subscribe queue": {
svc: &mockWorkerServiceWithNilSubscribeQueueOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Queue: newMockSQSQueue(),
},
},
},
original: &mockWorkerServiceWithNilSubscribeQueueOverride,
},
"with full subscribe queue and nil subscribe queue env": {
svc: &mockWorkerServiceWithSubscribeQueueNilOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Queue: newMockSQSQueue(),
},
},
},
original: &mockWorkerServiceWithSubscribeQueueNilOverride,
},
"with empty subscribe queue overridden by full subscribe queue": {
svc: &mockWorkerServiceWithSubscribeQueueEmptyOverride,
inEnvName: "test-sub",
wanted: &WorkerService{
Workload: Workload{
Name: aws.String("phonetool"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
WorkerServiceConfig: WorkerServiceConfig{
ImageConfig: ImageWithHealthcheck{
Image: Image{},
},
Subscribe: SubscribeConfig{
Queue: newMockSQSQueue(),
},
},
},
original: &mockWorkerServiceWithSubscribeQueueEmptyOverride,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got, _ := tc.svc.applyEnv(tc.inEnvName)
// Should override properly.
require.Equal(t, tc.wanted, got)
// Should not impact the original manifest struct.
require.Equal(t, tc.svc, tc.original)
})
}
}
func TestWorkerSvc_ApplyEnv_CountOverrides(t *testing.T) {
mockRange := IntRangeBand("1-10")
perc := Percentage(70)
mockConfig := ScalingConfigOrT[Percentage]{
Value: &perc,
}
testCases := map[string]struct {
svcCount Count
envCount Count
expected *WorkerService
}{
"empty env advanced count override": {
svcCount: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
CPU: mockConfig,
},
},
envCount: Count{},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
CPU: mockConfig,
},
},
},
},
},
},
"with count value overriden by count value": {
svcCount: Count{Value: aws.Int(5)},
envCount: Count{Value: aws.Int(8)},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
TaskConfig: TaskConfig{
Count: Count{Value: aws.Int(8)},
},
},
},
},
"with count value overriden by spot count": {
svcCount: Count{Value: aws.Int(4)},
envCount: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(6),
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(6),
},
},
},
},
},
},
"with range overriden by spot count": {
svcCount: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
},
},
envCount: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(6),
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(6),
},
},
},
},
},
},
"with range overriden by range config": {
svcCount: Count{
AdvancedCount: AdvancedCount{
Range: Range{Value: &mockRange},
},
},
envCount: Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(8),
},
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
TaskConfig: TaskConfig{
Count: Count{
AdvancedCount: AdvancedCount{
Range: Range{
RangeConfig: RangeConfig{
Min: aws.Int(2),
Max: aws.Int(8),
},
},
},
},
},
},
},
},
"with spot overriden by count value": {
svcCount: Count{
AdvancedCount: AdvancedCount{
Spot: aws.Int(5),
},
},
envCount: Count{Value: aws.Int(12)},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
TaskConfig: TaskConfig{
Count: Count{Value: aws.Int(12)},
},
},
},
},
}
for name, tc := range testCases {
// GIVEN
svc := WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
TaskConfig: TaskConfig{
Count: tc.svcCount,
},
},
Environments: map[string]*WorkerServiceConfig{
"test": {
TaskConfig: TaskConfig{
Count: tc.envCount,
},
},
"staging": {
TaskConfig: TaskConfig{},
},
},
}
t.Run(name, func(t *testing.T) {
// WHEN
actual, _ := svc.applyEnv("test")
// THEN
require.Equal(t, tc.expected, actual)
})
}
}
func TestDeadLetterQueue_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in DeadLetterQueue
wanted bool
}{
"empty dead letter queue": {
in: DeadLetterQueue{},
wanted: true,
},
"non empty dead letter queue": {
in: DeadLetterQueue{
Tries: aws.Uint16(3),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.IsEmpty()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestSQSQueueOrBool_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct SQSQueueOrBool
wantedError error
}{
"with boolean": {
inContent: []byte(`queue: true`),
wantedStruct: SQSQueueOrBool{
Enabled: aws.Bool(true),
},
},
"with advanced case": {
inContent: []byte(`queue:
retention: 5s
delay: 1m
timeout: 5m
dead_letter:
tries: 10`),
wantedStruct: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: durationp(5 * time.Second),
Delay: durationp(1 * time.Minute),
Timeout: durationp(5 * time.Minute),
DeadLetter: DeadLetterQueue{
Tries: uint16P(10),
},
},
},
},
"invalid type": {
inContent: []byte(`queue: 10`),
wantedError: errUnmarshalQueueOpts,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var sc TopicSubscription
err := yaml.Unmarshal(tc.inContent, &sc)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
// check memberwise dereferenced pointer equality
require.Equal(t, tc.wantedStruct.Enabled, sc.Queue.Enabled)
require.Equal(t, tc.wantedStruct.Advanced.DeadLetter, sc.Queue.Advanced.DeadLetter)
require.Equal(t, tc.wantedStruct.Advanced.Delay, sc.Queue.Advanced.Delay)
require.Equal(t, tc.wantedStruct.Advanced.Retention, sc.Queue.Advanced.Retention)
require.Equal(t, tc.wantedStruct.Advanced.Timeout, sc.Queue.Advanced.Timeout)
}
})
}
}
func TestWorkerService_RequiredEnvironmentFeatures(t *testing.T) {
testCases := map[string]struct {
mft func(svc *WorkerService)
wanted []string
}{
"no feature required by default": {
mft: func(svc *WorkerService) {},
},
"nat feature required": {
mft: func(svc *WorkerService) {
svc.Network = NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PrivateSubnetPlacement),
},
},
}
},
wanted: []string{template.NATFeatureName},
},
"efs feature required by enabling managed volume": {
mft: func(svc *WorkerService) {
svc.Storage = Storage{
Volumes: map[string]*Volume{
"mock-managed-volume-1": {
EFS: EFSConfigOrBool{
Enabled: aws.Bool(true),
},
},
"mock-imported-volume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("mock-id"),
},
},
},
},
}
},
wanted: []string{template.EFSFeatureName},
},
"efs feature not required because storage is imported": {
mft: func(svc *WorkerService) {
svc.Storage = Storage{
Volumes: map[string]*Volume{
"mock-imported-volume": {
EFS: EFSConfigOrBool{
Advanced: EFSVolumeConfiguration{
FileSystemID: aws.String("mock-id"),
},
},
},
},
}
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
inSvc := WorkerService{
Workload: Workload{
Name: aws.String("mock-svc"),
Type: aws.String(manifestinfo.WorkerServiceType),
},
}
tc.mft(&inSvc)
got := inSvc.requiredEnvironmentFeatures()
require.Equal(t, tc.wanted, got)
})
}
}
func TestWorkerService_Subscriptions(t *testing.T) {
duration111Seconds := 111 * time.Second
testCases := map[string]struct {
input *WorkerService
expected *WorkerService
}{
"empty subscription": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{},
},
},
},
"valid subscribe with one topic specific standard queue and a default standard queue": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
},
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
},
},
},
},
Queue: SQSQueue{},
},
},
},
},
"valid subscribe with one topic specific fifo queue and a default standard queue": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSFIFOQueueOrBool(),
},
},
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name.fifo"),
Service: aws.String("svc"),
Queue: newMockSQSFIFOQueueOrBool(),
},
},
Queue: SQSQueue{},
},
},
},
},
"valid subscribe with no topic specific standard queue but with default standard queue with empty config": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
},
},
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
},
},
Queue: SQSQueue{},
},
},
},
},
"valid subscribe with no topic specific standard queue but with default standard queue with minimal config": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
},
},
Queue: newMockSQSQueue(),
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
},
},
Queue: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
},
},
},
},
},
"valid subscribe with no topic specific fifo queue but with default fifo queue with minimal config": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
},
},
Queue: newMockSQSFIFOQueue(),
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name.fifo"),
Service: aws.String("svc"),
},
},
Queue: newMockSQSFIFOQueue(),
},
},
},
},
"valid subscribe with one topic specific standard queue and another subscription for the default fifo queue": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: newMockSQSQueueOrBool(),
},
{
Name: aws.String("name2"),
Service: aws.String("svc"),
},
},
Queue: newMockSQSFIFOQueue(),
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
Queue: SQSQueueOrBool{
Advanced: SQSQueue{
Retention: &duration111Seconds,
Delay: &duration111Seconds,
Timeout: &duration111Seconds,
DeadLetter: DeadLetterQueue{Tries: aws.Uint16(10)},
},
},
},
{
Name: aws.String("name2.fifo"),
Service: aws.String("svc"),
},
},
Queue: newMockSQSFIFOQueue(),
},
},
},
},
"valid subscribe with one topic specific fifo queue and another subscription for the default standard queue": {
input: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
},
{
Name: aws.String("name2"),
Service: aws.String("svc"),
Queue: newMockSQSFIFOQueueOrBool(),
},
},
},
},
},
expected: &WorkerService{
WorkerServiceConfig: WorkerServiceConfig{
Subscribe: SubscribeConfig{
Topics: []TopicSubscription{
{
Name: aws.String("name"),
Service: aws.String("svc"),
},
{
Name: aws.String("name2.fifo"),
Service: aws.String("svc"),
Queue: newMockSQSFIFOQueueOrBool(),
},
},
Queue: SQSQueue{},
},
},
},
},
}
for name, tc := range testCases {
svc := tc.input
t.Run(name, func(t *testing.T) {
// WHEN
svc.Subscribe.Topics = svc.Subscriptions()
// THEN
require.Equal(t, tc.expected, svc)
})
}
}
| 1,839 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/google/shlex"
"github.com/aws/aws-sdk-go/aws"
"gopkg.in/yaml.v3"
)
const (
defaultDockerfileName = "Dockerfile"
)
// SQS Queue field options.
const (
sqsFIFOThroughputLimitPerMessageGroupID = "perMessageGroupId"
sqsFIFOThroughputLimitPerQueue = "perQueue"
sqsDeduplicationScopeMessageGroup = "messageGroup"
sqsDeduplicationScopeQueue = "queue"
)
// AWS VPC subnet placement options.
const (
PublicSubnetPlacement = PlacementString("public")
PrivateSubnetPlacement = PlacementString("private")
)
// All placement options.
var (
subnetPlacements = []string{string(PublicSubnetPlacement), string(PrivateSubnetPlacement)}
)
// Error definitions.
var (
ErrAppRunnerInvalidPlatformWindows = errors.New("Windows is not supported for App Runner services")
errUnmarshalBuildOpts = errors.New("unable to unmarshal build field into string or compose-style map")
errUnmarshalPlatformOpts = errors.New("unable to unmarshal platform field into string or compose-style map")
errUnmarshalSecurityGroupOpts = errors.New(`unable to unmarshal "security_groups" field into slice of strings or compose-style map`)
errUnmarshalPlacementOpts = errors.New("unable to unmarshal placement field into string or compose-style map")
errUnmarshalServiceConnectOpts = errors.New(`unable to unmarshal "connect" field into boolean or compose-style map`)
errUnmarshalSubnetsOpts = errors.New("unable to unmarshal subnets field into string slice or compose-style map")
errUnmarshalCountOpts = errors.New(`unable to unmarshal "count" field to an integer or autoscaling configuration`)
errUnmarshalRangeOpts = errors.New(`unable to unmarshal "range" field`)
errUnmarshalExec = errors.New(`unable to unmarshal "exec" field into boolean or exec configuration`)
errUnmarshalEntryPoint = errors.New(`unable to unmarshal "entrypoint" into string or slice of strings`)
errUnmarshalAlias = errors.New(`unable to unmarshal "alias" into advanced alias map, string, or slice of strings`)
errUnmarshalCommand = errors.New(`unable to unmarshal "command" into string or slice of strings`)
)
// DynamicWorkload represents a dynamically populated workload.
type DynamicWorkload interface {
ApplyEnv(envName string) (DynamicWorkload, error)
Validate() error
RequiredEnvironmentFeatures() []string
Load(sess *session.Session) error
Manifest() any
}
type workloadManifest interface {
validate() error
applyEnv(envName string) (workloadManifest, error)
requiredEnvironmentFeatures() []string
subnets() *SubnetListOrArgs
}
// UnmarshalWorkload deserializes the YAML input stream into a workload manifest object.
// If an error occurs during deserialization, then returns the error.
// If the workload type in the manifest is invalid, then returns an ErrInvalidmanifestinfo.
func UnmarshalWorkload(in []byte) (DynamicWorkload, error) {
am := Workload{}
if err := yaml.Unmarshal(in, &am); err != nil {
return nil, fmt.Errorf("unmarshal to workload manifest: %w", err)
}
typeVal := aws.StringValue(am.Type)
var m workloadManifest
switch typeVal {
case manifestinfo.LoadBalancedWebServiceType:
m = newDefaultLoadBalancedWebService()
case manifestinfo.RequestDrivenWebServiceType:
m = newDefaultRequestDrivenWebService()
case manifestinfo.BackendServiceType:
m = newDefaultBackendService()
case manifestinfo.WorkerServiceType:
m = newDefaultWorkerService()
case manifestinfo.StaticSiteType:
m = newDefaultStaticSite()
case manifestinfo.ScheduledJobType:
m = newDefaultScheduledJob()
default:
return nil, &ErrInvalidWorkloadType{Type: typeVal}
}
if err := yaml.Unmarshal(in, m); err != nil {
return nil, fmt.Errorf("unmarshal manifest for %s: %w", typeVal, err)
}
return newDynamicWorkloadManifest(m), nil
}
// WorkloadProps contains properties for creating a new workload manifest.
type WorkloadProps struct {
Name string
Dockerfile string
Image string
PrivateOnlyEnvironments []string
}
// Workload holds the basic data that every workload manifest file needs to have.
type Workload struct {
Name *string `yaml:"name"`
Type *string `yaml:"type"` // must be one of the supported manifest types.
}
// Image represents the workload's container image.
type Image struct {
ImageLocationOrBuild `yaml:",inline"`
Credentials *string `yaml:"credentials"` // ARN of the secret containing the private repository credentials.
DockerLabels map[string]string `yaml:"labels,flow"` // Apply Docker labels to the container at runtime.
DependsOn DependsOn `yaml:"depends_on,flow"` // Add any sidecar dependencies.
}
// ImageLocationOrBuild represents the docker build arguments and location of the existing image.
type ImageLocationOrBuild struct {
Build BuildArgsOrString `yaml:"build"` // Build an image from a Dockerfile.
Location *string `yaml:"location"` // Use an existing image instead.
}
// DependsOn represents container dependency for a container.
type DependsOn map[string]string
// UnmarshalYAML overrides the default YAML unmarshaling logic for the Image
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (i *Image) UnmarshalYAML(value *yaml.Node) error {
type image Image
if err := value.Decode((*image)(i)); err != nil {
return err
}
if !i.Build.isEmpty() && i.Location != nil {
return &errFieldMutualExclusive{
firstField: "build",
secondField: "location",
mustExist: true,
}
}
return nil
}
// GetLocation returns the location of the image.
func (i Image) GetLocation() string {
return aws.StringValue(i.Location)
}
// BuildConfig populates a docker.BuildArguments struct from the fields available in the manifest.
// Prefer the following hierarchy:
// 1. Specific dockerfile, specific context
// 2. Specific dockerfile, context = dockerfile dir
// 3. "Dockerfile" located in context dir
// 4. "Dockerfile" located in ws root.
func (i *ImageLocationOrBuild) BuildConfig(rootDirectory string) *DockerBuildArgs {
df := i.dockerfile()
ctx := i.context()
dockerfile := aws.String(filepath.Join(rootDirectory, defaultDockerfileName))
context := aws.String(rootDirectory)
if df != "" && ctx != "" {
dockerfile = aws.String(filepath.Join(rootDirectory, df))
context = aws.String(filepath.Join(rootDirectory, ctx))
}
if df != "" && ctx == "" {
dockerfile = aws.String(filepath.Join(rootDirectory, df))
context = aws.String(filepath.Join(rootDirectory, filepath.Dir(df)))
}
if df == "" && ctx != "" {
dockerfile = aws.String(filepath.Join(rootDirectory, ctx, defaultDockerfileName))
context = aws.String(filepath.Join(rootDirectory, ctx))
}
return &DockerBuildArgs{
Dockerfile: dockerfile,
Context: context,
Args: i.args(),
Target: i.target(),
CacheFrom: i.cacheFrom(),
}
}
// dockerfile returns the path to the workload's Dockerfile. If no dockerfile is specified,
// returns "".
func (i *ImageLocationOrBuild) dockerfile() string {
// Prefer to use the "Dockerfile" string in BuildArgs. Otherwise,
// "BuildString". If no dockerfile specified, return "".
if i.Build.BuildArgs.Dockerfile != nil {
return aws.StringValue(i.Build.BuildArgs.Dockerfile)
}
var dfPath string
if i.Build.BuildString != nil {
dfPath = aws.StringValue(i.Build.BuildString)
}
return dfPath
}
// context returns the build context directory if it exists, otherwise an empty string.
func (i *ImageLocationOrBuild) context() string {
return aws.StringValue(i.Build.BuildArgs.Context)
}
// args returns the args section, if it exists, to override args in the dockerfile.
// Otherwise it returns an empty map.
func (i *ImageLocationOrBuild) args() map[string]string {
return i.Build.BuildArgs.Args
}
// target returns the build target stage if it exists, otherwise nil.
func (i *ImageLocationOrBuild) target() *string {
return i.Build.BuildArgs.Target
}
// cacheFrom returns the cache from build section, if it exists.
// Otherwise it returns nil.
func (i *ImageLocationOrBuild) cacheFrom() []string {
return i.Build.BuildArgs.CacheFrom
}
// ImageOverride holds fields that override Dockerfile image defaults.
type ImageOverride struct {
EntryPoint EntryPointOverride `yaml:"entrypoint"`
Command CommandOverride `yaml:"command"`
}
// StringSliceOrShellString is either a slice of string or a string using shell-style rules.
type stringSliceOrShellString StringSliceOrString
// EntryPointOverride is a custom type which supports unmarshalling "entrypoint" yaml which
// can either be of type string or type slice of string.
type EntryPointOverride stringSliceOrShellString
// CommandOverride is a custom type which supports unmarshalling "command" yaml which
// can either be of type string or type slice of string.
type CommandOverride stringSliceOrShellString
// UnmarshalYAML overrides the default YAML unmarshalling logic for the EntryPointOverride
// struct, allowing it to be unmarshalled into a string slice or a string.
// This method implements the yaml.Unmarshaler (v3) interface.
func (e *EntryPointOverride) UnmarshalYAML(value *yaml.Node) error {
if err := (*StringSliceOrString)(e).UnmarshalYAML(value); err != nil {
return errUnmarshalEntryPoint
}
return nil
}
// ToStringSlice converts an EntryPointOverride to a slice of string using shell-style rules.
func (e *EntryPointOverride) ToStringSlice() ([]string, error) {
out, err := (*stringSliceOrShellString)(e).toStringSlice()
if err != nil {
return nil, err
}
return out, nil
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the CommandOverride
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (c *CommandOverride) UnmarshalYAML(value *yaml.Node) error {
if err := (*StringSliceOrString)(c).UnmarshalYAML(value); err != nil {
return errUnmarshalCommand
}
return nil
}
// ToStringSlice converts an CommandOverride to a slice of string using shell-style rules.
func (c *CommandOverride) ToStringSlice() ([]string, error) {
out, err := (*stringSliceOrShellString)(c).toStringSlice()
if err != nil {
return nil, err
}
return out, nil
}
// StringSliceOrString is a custom type that can either be of type string or type slice of string.
type StringSliceOrString struct {
String *string
StringSlice []string
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the StringSliceOrString
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (s *StringSliceOrString) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&s.StringSlice); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if s.StringSlice != nil {
// Unmarshaled successfully to s.StringSlice, unset s.String, and return.
s.String = nil
return nil
}
return value.Decode(&s.String)
}
func (s *StringSliceOrString) isEmpty() bool {
return s.String == nil && len(s.StringSlice) == 0
}
// ToStringSlice converts an StringSliceOrString to a slice of string.
func (s *StringSliceOrString) ToStringSlice() []string {
if s.StringSlice != nil {
return s.StringSlice
}
if s.String == nil {
return nil
}
return []string{*s.String}
}
func (s *stringSliceOrShellString) toStringSlice() ([]string, error) {
if s.StringSlice != nil {
return s.StringSlice, nil
}
if s.String == nil {
return nil, nil
}
out, err := shlex.Split(*s.String)
if err != nil {
return nil, fmt.Errorf("convert string into tokens using shell-style rules: %w", err)
}
return out, nil
}
// BuildArgsOrString is a custom type which supports unmarshaling yaml which
// can either be of type string or type DockerBuildArgs.
type BuildArgsOrString struct {
BuildString *string
BuildArgs DockerBuildArgs
}
func (b *BuildArgsOrString) isEmpty() bool {
if aws.StringValue(b.BuildString) == "" && b.BuildArgs.isEmpty() {
return true
}
return false
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the BuildArgsOrString
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (b *BuildArgsOrString) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&b.BuildArgs); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !b.BuildArgs.isEmpty() {
// Unmarshaled successfully to b.BuildArgs, unset b.BuildString, and return.
b.BuildString = nil
return nil
}
if err := value.Decode(&b.BuildString); err != nil {
return errUnmarshalBuildOpts
}
return nil
}
// DockerBuildArgs represents the options specifiable under the "build" field
// of Docker Compose services. For more information, see:
// https://docs.docker.com/compose/compose-file/#build
type DockerBuildArgs struct {
Context *string `yaml:"context,omitempty"`
Dockerfile *string `yaml:"dockerfile,omitempty"`
Args map[string]string `yaml:"args,omitempty"`
Target *string `yaml:"target,omitempty"`
CacheFrom []string `yaml:"cache_from,omitempty"`
}
func (b *DockerBuildArgs) isEmpty() bool {
if b.Context == nil && b.Dockerfile == nil && b.Args == nil && b.Target == nil && b.CacheFrom == nil {
return true
}
return false
}
// PublishConfig represents the configurable options for setting up publishers.
type PublishConfig struct {
Topics []Topic `yaml:"topics"`
}
// Topic represents the configurable options for setting up a SNS Topic.
type Topic struct {
Name *string `yaml:"name"`
FIFO FIFOTopicAdvanceConfigOrBool `yaml:"fifo"`
}
// FIFOTopicAdvanceConfigOrBool represents the configurable options for fifo topics.
type FIFOTopicAdvanceConfigOrBool struct {
Enable *bool
Advanced FIFOTopicAdvanceConfig
}
// IsEmpty returns true if the FifoAdvanceConfigOrBool struct has all nil values.
func (f *FIFOTopicAdvanceConfigOrBool) IsEmpty() bool {
return f.Enable == nil && f.Advanced.IsEmpty()
}
// IsEnabled returns true if the FIFO is enabled on the SQS queue.
func (f *FIFOTopicAdvanceConfigOrBool) IsEnabled() bool {
return aws.BoolValue(f.Enable) || !f.Advanced.IsEmpty()
}
// FIFOTopicAdvanceConfig represents the advanced fifo topic config.
type FIFOTopicAdvanceConfig struct {
ContentBasedDeduplication *bool `yaml:"content_based_deduplication"`
}
// IsEmpty returns true if the FifoAdvanceConfig struct has all nil values.
func (a *FIFOTopicAdvanceConfig) IsEmpty() bool {
return a.ContentBasedDeduplication == nil
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the FIFOTopicAdvanceConfigOrBool
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (t *FIFOTopicAdvanceConfigOrBool) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&t.Advanced); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !t.Advanced.IsEmpty() {
return nil
}
if err := value.Decode(&t.Enable); err != nil {
return errUnmarshalFifoConfig
}
return nil
}
// NetworkConfig represents options for network connection to AWS resources within a VPC.
type NetworkConfig struct {
VPC vpcConfig `yaml:"vpc"`
Connect ServiceConnectBoolOrArgs `yaml:"connect"`
}
// IsEmpty returns empty if the struct has all zero members.
func (c *NetworkConfig) IsEmpty() bool {
return c.VPC.isEmpty()
}
func (c *NetworkConfig) requiredEnvFeatures() []string {
if aws.StringValue((*string)(c.VPC.Placement.PlacementString)) == string(PrivateSubnetPlacement) {
return []string{template.NATFeatureName}
}
return nil
}
// ServiceConnectBoolOrArgs represents ECS Service Connect configuration.
type ServiceConnectBoolOrArgs struct {
EnableServiceConnect *bool
ServiceConnectArgs
}
// Enabled returns if ServiceConnect is enabled or not.
func (s *ServiceConnectBoolOrArgs) Enabled() bool {
return aws.BoolValue(s.EnableServiceConnect) || !s.ServiceConnectArgs.isEmpty()
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the ServiceConnect
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (s *ServiceConnectBoolOrArgs) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&s.ServiceConnectArgs); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !s.ServiceConnectArgs.isEmpty() {
s.EnableServiceConnect = nil
return nil
}
if err := value.Decode(&s.EnableServiceConnect); err != nil {
return errUnmarshalServiceConnectOpts
}
return nil
}
// ServiceConnectArgs includes the advanced configuration for ECS Service Connect.
type ServiceConnectArgs struct {
Alias *string
}
func (s *ServiceConnectArgs) isEmpty() bool {
return s.Alias == nil
}
// PlacementArgOrString represents where to place tasks.
type PlacementArgOrString struct {
*PlacementString
PlacementArgs
}
// PlacementString represents what types of subnets (public or private subnets) to place tasks.
type PlacementString string
// IsEmpty returns empty if the struct has all zero members.
func (p *PlacementArgOrString) IsEmpty() bool {
return p.PlacementString == nil && p.PlacementArgs.isEmpty()
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the PlacementArgOrString
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (p *PlacementArgOrString) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&p.PlacementArgs); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !p.PlacementArgs.isEmpty() {
// Unmarshaled successfully to p.PlacementArgs, unset p.PlacementString, and return.
p.PlacementString = nil
return nil
}
if err := value.Decode(&p.PlacementString); err != nil {
return errUnmarshalPlacementOpts
}
return nil
}
// PlacementArgs represents where to place tasks.
type PlacementArgs struct {
Subnets SubnetListOrArgs `yaml:"subnets"`
}
func (p *PlacementArgs) isEmpty() bool {
return p.Subnets.isEmpty()
}
// SubnetListOrArgs represents what subnets to place tasks. It supports unmarshalling
// yaml which can either be of type SubnetArgs or a list of strings.
type SubnetListOrArgs struct {
IDs []string
SubnetArgs
}
func (s *SubnetListOrArgs) isEmpty() bool {
return len(s.IDs) == 0 && s.SubnetArgs.isEmpty()
}
type dynamicSubnets struct {
cfg *SubnetListOrArgs
client subnetIDsGetter
}
// Load populates the subnet's IDs field if the client is using tags.
func (dyn *dynamicSubnets) load() error {
if dyn.cfg == nil || dyn.cfg.isEmpty() {
return nil
}
if len(dyn.cfg.IDs) > 0 {
return nil
}
var filters []ec2.Filter
for k, v := range dyn.cfg.FromTags {
values := v.StringSlice
if v.String != nil {
values = v.ToStringSlice()
}
filters = append(filters, ec2.FilterForTags(k, values...))
}
ids, err := dyn.client.SubnetIDs(filters...)
if err != nil {
return fmt.Errorf("get subnet IDs: %w", err)
}
dyn.cfg.IDs = ids
return nil
}
// Tags represents the aws tags which take string as key and slice of string as values.
type Tags map[string]StringSliceOrString
// SubnetArgs represents what subnets to place tasks.
type SubnetArgs struct {
FromTags Tags `yaml:"from_tags"`
}
func (s *SubnetArgs) isEmpty() bool {
return len(s.FromTags) == 0
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the SubnetListOrArgs
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (s *SubnetListOrArgs) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&s.SubnetArgs); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !s.SubnetArgs.isEmpty() {
// Unmarshaled successfully to s.SubnetArgs, unset s.Subnets, and return.
s.IDs = nil
return nil
}
if err := value.Decode(&s.IDs); err != nil {
return errUnmarshalSubnetsOpts
}
return nil
}
// SecurityGroupsIDsOrConfig represents security groups attached to task. It supports unmarshalling
// yaml which can either be of type SecurityGroupsConfig or a list of strings.
type SecurityGroupsIDsOrConfig struct {
IDs []stringOrFromCFN
AdvancedConfig SecurityGroupsConfig
}
func (s *SecurityGroupsIDsOrConfig) isEmpty() bool {
return len(s.IDs) == 0 && s.AdvancedConfig.isEmpty()
}
// SecurityGroupsConfig represents which security groups are attached to a task
// and if default security group is applied.
type SecurityGroupsConfig struct {
SecurityGroups []stringOrFromCFN `yaml:"groups"`
DenyDefault *bool `yaml:"deny_default"`
}
func (s *SecurityGroupsConfig) isEmpty() bool {
return len(s.SecurityGroups) == 0 && s.DenyDefault == nil
}
// UnmarshalYAML overrides the default YAML unmarshalling logic for the SecurityGroupsIDsOrConfig
// struct, allowing it to be unmarshalled into a string slice or a string.
// This method implements the yaml.Unmarshaler (v3) interface.
func (s *SecurityGroupsIDsOrConfig) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&s.AdvancedConfig); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !s.AdvancedConfig.isEmpty() {
// Unmarshalled successfully to s.AdvancedConfig, unset s.IDs, and return.
s.IDs = nil
return nil
}
if err := value.Decode(&s.IDs); err != nil {
return errUnmarshalSecurityGroupOpts
}
return nil
}
// GetIDs returns security groups from SecurityGroupsIDsOrConfig that are attached to task.
// nil is returned if no security groups are specified.
func (s *SecurityGroupsIDsOrConfig) GetIDs() []stringOrFromCFN {
if !s.AdvancedConfig.isEmpty() {
return s.AdvancedConfig.SecurityGroups
}
return s.IDs
}
// IsDefaultSecurityGroupDenied returns true if DenyDefault is set to true
// in SecurityGroupsIDsOrConfig.AdvancedConfig. Otherwise, false is returned.
func (s *SecurityGroupsIDsOrConfig) IsDefaultSecurityGroupDenied() bool {
if !s.AdvancedConfig.isEmpty() {
return aws.BoolValue(s.AdvancedConfig.DenyDefault)
}
return false
}
// vpcConfig represents the security groups and subnets attached to a task.
type vpcConfig struct {
Placement PlacementArgOrString `yaml:"placement"`
SecurityGroups SecurityGroupsIDsOrConfig `yaml:"security_groups"`
}
func (v *vpcConfig) isEmpty() bool {
return v.Placement.IsEmpty() && v.SecurityGroups.isEmpty()
}
// PlatformArgsOrString is a custom type which supports unmarshaling yaml which
// can either be of type string or type PlatformArgs.
type PlatformArgsOrString struct {
*PlatformString
PlatformArgs PlatformArgs
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the PlatformArgsOrString
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (p *PlatformArgsOrString) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&p.PlatformArgs); err != nil {
var yamlTypeErr *yaml.TypeError
if !errors.As(err, &yamlTypeErr) {
return err
}
}
if !p.PlatformArgs.isEmpty() {
// Unmarshaled successfully to p.PlatformArgs, unset p.PlatformString, and return.
p.PlatformString = nil
return nil
}
if err := value.Decode(&p.PlatformString); err != nil {
return errUnmarshalPlatformOpts
}
return nil
}
// OS returns the operating system family.
func (p *PlatformArgsOrString) OS() string {
if p := aws.StringValue((*string)(p.PlatformString)); p != "" {
args := strings.Split(p, "/")
return strings.ToLower(args[0])
}
return strings.ToLower(aws.StringValue(p.PlatformArgs.OSFamily))
}
// Arch returns the architecture of PlatformArgsOrString.
func (p *PlatformArgsOrString) Arch() string {
if p := aws.StringValue((*string)(p.PlatformString)); p != "" {
args := strings.Split(p, "/")
return strings.ToLower(args[1])
}
return strings.ToLower(aws.StringValue(p.PlatformArgs.Arch))
}
// PlatformArgs represents the specifics of a target OS.
type PlatformArgs struct {
OSFamily *string `yaml:"osfamily,omitempty"`
Arch *string `yaml:"architecture,omitempty"`
}
// PlatformString represents the string format of Platform.
type PlatformString string
// String implements the fmt.Stringer interface.
func (p *PlatformArgs) String() string {
return fmt.Sprintf("('%s', '%s')", aws.StringValue(p.OSFamily), aws.StringValue(p.Arch))
}
// IsEmpty returns if the platform field is empty.
func (p *PlatformArgsOrString) IsEmpty() bool {
return p.PlatformString == nil && p.PlatformArgs.isEmpty()
}
func (p *PlatformArgs) isEmpty() bool {
return p.OSFamily == nil && p.Arch == nil
}
func (p *PlatformArgs) bothSpecified() bool {
return (p.OSFamily != nil) && (p.Arch != nil)
}
// platformString returns a specified of the format <os>/<arch>.
func platformString(os, arch string) string {
return fmt.Sprintf("%s/%s", os, arch)
}
// RedirectPlatform returns a platform that's supported for the given manifest type.
func RedirectPlatform(os, arch, wlType string) (platform string, err error) {
// Return nil if passed the default platform.
if platformString(os, arch) == defaultPlatform {
return "", nil
}
// Return an error if a platform cannot be redirected.
if wlType == manifestinfo.RequestDrivenWebServiceType && os == OSWindows {
return "", ErrAppRunnerInvalidPlatformWindows
}
// All architectures default to 'x86_64' (though 'arm64' is now also supported); leave OS as is.
// If a string is returned, the platform is not the default platform but is supported (except for more obscure platforms).
return platformString(os, dockerengine.ArchX86), nil
}
func isWindowsPlatform(platform PlatformArgsOrString) bool {
for _, win := range windowsOSFamilies {
if platform.OS() == win {
return true
}
}
return false
}
// IsArmArch returns whether or not the arch is ARM.
func IsArmArch(arch string) bool {
return strings.ToLower(arch) == ArchARM || strings.ToLower(arch) == ArchARM64
}
func requiresBuild(image Image) (bool, error) {
noBuild, noURL := image.Build.isEmpty(), image.Location == nil
// Error if both of them are specified or neither is specified.
if noBuild == noURL {
return false, fmt.Errorf(`either "image.build" or "image.location" needs to be specified in the manifest`)
}
if image.Location == nil {
return true, nil
}
return false, nil
}
func stringP(s string) *string {
if s == "" {
return nil
}
return &s
}
func uint16P(n uint16) *uint16 {
if n == 0 {
return nil
}
return &n
}
func placementStringP(p PlacementString) *PlacementString {
if p == "" {
return nil
}
placement := p
return &placement
}
func (cfg PublishConfig) publishedTopics() []Topic {
if len(cfg.Topics) == 0 {
return nil
}
pubs := make([]Topic, len(cfg.Topics))
for i, topic := range cfg.Topics {
if topic.FIFO.IsEnabled() {
topic.Name = aws.String(aws.StringValue(topic.Name) + ".fifo")
}
pubs[i] = topic
}
return pubs
}
| 867 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"gopkg.in/yaml.v3"
)
// Defaults for Firelens configuration.
const (
FirelensContainerName = "firelens_log_router"
defaultFluentbitImage = "public.ecr.aws/aws-observability/aws-for-fluent-bit:stable"
)
// Platform related settings.
const (
OSLinux = dockerengine.OSLinux
OSWindows = dockerengine.OSWindows
OSWindowsServer2019Core = "windows_server_2019_core"
OSWindowsServer2019Full = "windows_server_2019_full"
OSWindowsServer2022Core = "windows_server_2022_core"
OSWindowsServer2022Full = "windows_server_2022_full"
ArchAMD64 = dockerengine.ArchAMD64
ArchX86 = dockerengine.ArchX86
ArchARM = dockerengine.ArchARM
ArchARM64 = dockerengine.ArchARM64
// Minimum CPU and mem values required for Windows-based tasks.
MinWindowsTaskCPU = 1024
MinWindowsTaskMemory = 2048
// deployment strategies
ECSDefaultRollingUpdateStrategy = "default"
ECSRecreateRollingUpdateStrategy = "recreate"
)
// Platform related settings.
var (
defaultPlatform = platformString(OSLinux, ArchAMD64)
windowsOSFamilies = []string{OSWindows, OSWindowsServer2019Core, OSWindowsServer2019Full, OSWindowsServer2022Core, OSWindowsServer2022Full}
validShortPlatforms = []string{ // All of the os/arch combinations that the PlatformString field may accept.
dockerengine.PlatformString(OSLinux, ArchAMD64),
dockerengine.PlatformString(OSLinux, ArchX86),
dockerengine.PlatformString(OSLinux, ArchARM),
dockerengine.PlatformString(OSLinux, ArchARM64),
dockerengine.PlatformString(OSWindows, ArchAMD64),
dockerengine.PlatformString(OSWindows, ArchX86),
}
validAdvancedPlatforms = []PlatformArgs{ // All of the OsFamily/Arch combinations that the PlatformArgs field may accept.
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchARM)},
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchARM64)},
{OSFamily: aws.String(OSWindows), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindows), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSWindowsServer2019Core), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindowsServer2019Core), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSWindowsServer2019Full), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindowsServer2019Full), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSWindowsServer2022Core), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindowsServer2022Core), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSWindowsServer2022Full), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindowsServer2022Full), Arch: aws.String(ArchAMD64)},
}
)
// ImageWithHealthcheck represents a container image with health check.
type ImageWithHealthcheck struct {
Image Image `yaml:",inline"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
}
// ImageWithPortAndHealthcheck represents a container image with an exposed port and health check.
type ImageWithPortAndHealthcheck struct {
ImageWithPort `yaml:",inline"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
}
// AlarmArgs represents specs of CloudWatch alarms for deployment rollbacks.
type AlarmArgs struct {
CPUUtilization *float64 `yaml:"cpu_utilization"`
MemoryUtilization *float64 `yaml:"memory_utilization"`
}
// WorkerAlarmArgs represents specs of CloudWatch alarms for Worker Service deployment rollbacks.
type WorkerAlarmArgs struct {
AlarmArgs `yaml:",inline"`
MessagesDelayed *int `yaml:"messages_delayed"`
}
// DeploymentControllerConfig represents deployment strategies for a service.
type DeploymentControllerConfig struct {
Rolling *string `yaml:"rolling"`
}
// DeploymentConfig represents the deployment config for an ECS service.
type DeploymentConfig struct {
DeploymentControllerConfig `yaml:",inline"`
RollbackAlarms Union[[]string, AlarmArgs] `yaml:"rollback_alarms"`
}
// WorkerDeploymentConfig represents the deployment strategies for a worker service.
type WorkerDeploymentConfig struct {
DeploymentControllerConfig `yaml:",inline"`
WorkerRollbackAlarms Union[[]string, WorkerAlarmArgs] `yaml:"rollback_alarms"`
}
func (d *DeploymentConfig) isEmpty() bool {
return d == nil || (d.DeploymentControllerConfig.isEmpty() && d.RollbackAlarms.IsZero())
}
func (d *DeploymentControllerConfig) isEmpty() bool {
return d.Rolling == nil
}
func (w *WorkerDeploymentConfig) isEmpty() bool {
return w == nil || (w.DeploymentControllerConfig.Rolling == nil && w.WorkerRollbackAlarms.IsZero())
}
// ExposedPort will hold the port mapping configuration.
type ExposedPort struct {
ContainerName string // The name of the container that exposes this port.
Port uint16 // The port number.
Protocol string // Either "tcp" or "udp", empty means the default value that the underlying service provides.
isDefinedByContainer bool // Defines if the container port is exposed from "image.port" or "sidecar.port". defaults to false.
}
// ImageWithHealthcheckAndOptionalPort represents a container image with an optional exposed port and health check.
type ImageWithHealthcheckAndOptionalPort struct {
ImageWithOptionalPort `yaml:",inline"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
}
// ImageWithOptionalPort represents a container image with an optional exposed port.
type ImageWithOptionalPort struct {
Image Image `yaml:",inline"`
Port *uint16 `yaml:"port"`
}
// TaskConfig represents the resource boundaries and environment variables for the containers in the task.
type TaskConfig struct {
CPU *int `yaml:"cpu"`
Memory *int `yaml:"memory"`
Platform PlatformArgsOrString `yaml:"platform,omitempty"`
Count Count `yaml:"count"`
ExecuteCommand ExecuteCommand `yaml:"exec"`
Variables map[string]Variable `yaml:"variables"`
EnvFile *string `yaml:"env_file"`
Secrets map[string]Secret `yaml:"secrets"`
Storage Storage `yaml:"storage"`
}
// Variable represents an identifier for the value of an environment variable.
type Variable struct {
stringOrFromCFN
}
// UnmarshalYAML implements the yaml.Unmarshaler (v3) interface to override the default YAML unmarshalling logic.
func (v *Variable) UnmarshalYAML(value *yaml.Node) error {
if err := v.stringOrFromCFN.UnmarshalYAML(value); err != nil {
return fmt.Errorf(`unmarshal "variables": %w`, err)
}
return nil
}
// RequiresImport returns true if the value is imported from an environment.
func (v *Variable) RequiresImport() bool {
return !v.FromCFN.isEmpty()
}
// Value returns the value, whether it is used for import or not.
func (v *Variable) Value() string {
if v.RequiresImport() {
return aws.StringValue(v.FromCFN.Name)
}
return aws.StringValue(v.Plain)
}
// ContainerPlatform returns the platform for the service.
func (t *TaskConfig) ContainerPlatform() string {
if t.Platform.IsEmpty() {
return ""
}
if t.IsWindows() {
return platformString(OSWindows, t.Platform.Arch())
}
return platformString(t.Platform.OS(), t.Platform.Arch())
}
// IsWindows returns whether or not the service is building with a Windows OS.
func (t TaskConfig) IsWindows() bool {
return isWindowsPlatform(t.Platform)
}
// IsARM returns whether or not the service is building with an ARM Arch.
func (t TaskConfig) IsARM() bool {
return IsArmArch(t.Platform.Arch())
}
// Secret represents an identifier for sensitive data stored in either SSM or SecretsManager.
type Secret struct {
from stringOrFromCFN // SSM Parameter name or ARN to a secret or secret ARN imported from another CloudFormation stack.
fromSecretsManager secretsManagerSecret // Conveniently fetch from a secretsmanager secret name instead of ARN.
}
// UnmarshalYAML implements the yaml.Unmarshaler (v3) interface to override the default YAML unmarshaling logic.
func (s *Secret) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&s.fromSecretsManager); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !s.fromSecretsManager.IsEmpty() { // Successfully unmarshaled to a secretsmanager name.
return nil
}
if err := value.Decode(&s.from); err != nil { // Otherwise, try decoding the simple form.
return errors.New(`cannot marshal "secret" field to a string or "secretsmanager" object`)
}
return nil
}
// IsSecretsManagerName returns true if the secret refers to the name of a secret stored in SecretsManager.
func (s *Secret) IsSecretsManagerName() bool {
return !s.fromSecretsManager.IsEmpty()
}
// RequiresImport returns true if the SSM parameter name or secret ARN value is imported from CloudFormation stack.
func (s *Secret) RequiresImport() bool {
return !s.from.FromCFN.isEmpty()
}
// Value returns the secret value provided by clients.
func (s *Secret) Value() string {
if !s.fromSecretsManager.IsEmpty() {
return aws.StringValue(s.fromSecretsManager.Name)
} else if s.RequiresImport() {
return aws.StringValue(s.from.FromCFN.Name)
}
return aws.StringValue(s.from.Plain)
}
// secretsManagerSecret represents the name of a secret stored in SecretsManager.
type secretsManagerSecret struct {
Name *string `yaml:"secretsmanager"`
}
// IsEmpty returns true if all the fields in secretsManagerSecret have the zero value.
func (s secretsManagerSecret) IsEmpty() bool {
return s.Name == nil
}
// Logging holds configuration for Firelens to route your logs.
type Logging struct {
Retention *int `yaml:"retention"`
Image *string `yaml:"image"`
Destination map[string]string `yaml:"destination,flow"`
EnableMetadata *bool `yaml:"enableMetadata"`
SecretOptions map[string]Secret `yaml:"secretOptions"`
ConfigFile *string `yaml:"configFilePath"`
Variables map[string]Variable `yaml:"variables"`
Secrets map[string]Secret `yaml:"secrets"`
EnvFile *string `yaml:"env_file"`
}
// IsEmpty returns empty if the struct has all zero members.
func (lc *Logging) IsEmpty() bool {
return lc.Image == nil && lc.Destination == nil && lc.EnableMetadata == nil && lc.SecretOptions == nil &&
lc.ConfigFile == nil && lc.Variables == nil && lc.Secrets == nil && lc.EnvFile == nil
}
// LogImage returns the default Fluent Bit image if not otherwise configured.
func (lc *Logging) LogImage() *string {
if lc.Image == nil {
return aws.String(defaultFluentbitImage)
}
return lc.Image
}
// GetEnableMetadata returns the configuration values and sane default for the EnableMEtadata field
func (lc *Logging) GetEnableMetadata() *string {
if lc.EnableMetadata == nil {
// Enable ecs log metadata by default.
return aws.String("true")
}
return aws.String(strconv.FormatBool(*lc.EnableMetadata))
}
// SidecarConfig represents the configurable options for setting up a sidecar container.
type SidecarConfig struct {
Port *string `yaml:"port"`
Image Union[*string, ImageLocationOrBuild] `yaml:"image"`
Essential *bool `yaml:"essential"`
CredsParam *string `yaml:"credentialsParameter"`
Variables map[string]Variable `yaml:"variables"`
EnvFile *string `yaml:"env_file"`
Secrets map[string]Secret `yaml:"secrets"`
MountPoints []SidecarMountPoint `yaml:"mount_points"`
DockerLabels map[string]string `yaml:"labels"`
DependsOn DependsOn `yaml:"depends_on"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
ImageOverride `yaml:",inline"`
}
// ImageURI returns the location of the image if one is set.
// If the image needs to be build, return "" and false.
func (cfg *SidecarConfig) ImageURI() (string, bool) {
if cfg.Image.Basic != nil {
return aws.StringValue(cfg.Image.Basic), true
}
if cfg.Image.Advanced.Location != nil {
return aws.StringValue(cfg.Image.Advanced.Location), true
}
return "", false
}
// OverrideRule holds the manifest overriding rule for CloudFormation template.
type OverrideRule struct {
Path string `yaml:"path"`
Value yaml.Node `yaml:"value"`
}
// ExecuteCommand is a custom type which supports unmarshaling yaml which
// can either be of type bool or type ExecuteCommandConfig.
type ExecuteCommand struct {
Enable *bool
Config ExecuteCommandConfig
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the ExecuteCommand
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (e *ExecuteCommand) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&e.Config); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !e.Config.IsEmpty() {
return nil
}
if err := value.Decode(&e.Enable); err != nil {
return errUnmarshalExec
}
return nil
}
// ExecuteCommandConfig represents the configuration for ECS Execute Command.
type ExecuteCommandConfig struct {
Enable *bool `yaml:"enable"`
}
// IsEmpty returns whether ExecuteCommandConfig is empty.
func (e ExecuteCommandConfig) IsEmpty() bool {
return e.Enable == nil
}
// ContainerHealthCheck holds the configuration to determine if the service container is healthy.
// See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html
type ContainerHealthCheck struct {
Command []string `yaml:"command"`
Interval *time.Duration `yaml:"interval"`
Retries *int `yaml:"retries"`
Timeout *time.Duration `yaml:"timeout"`
StartPeriod *time.Duration `yaml:"start_period"`
}
// NewDefaultContainerHealthCheck returns container health check configuration
// that's identical to a load balanced web service's defaults.
func NewDefaultContainerHealthCheck() *ContainerHealthCheck {
return &ContainerHealthCheck{
Command: []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"},
Interval: durationp(10 * time.Second),
Retries: aws.Int(2),
Timeout: durationp(5 * time.Second),
StartPeriod: durationp(0 * time.Second),
}
}
// IsEmpty checks if the health check is empty.
func (hc ContainerHealthCheck) IsEmpty() bool {
return hc.Command == nil && hc.Interval == nil && hc.Retries == nil && hc.Timeout == nil && hc.StartPeriod == nil
}
// ApplyIfNotSet changes the healthcheck's fields only if they were not set and the other healthcheck has them set.
func (hc *ContainerHealthCheck) ApplyIfNotSet(other *ContainerHealthCheck) {
if hc.Command == nil && other.Command != nil {
hc.Command = other.Command
}
if hc.Interval == nil && other.Interval != nil {
hc.Interval = other.Interval
}
if hc.Retries == nil && other.Retries != nil {
hc.Retries = other.Retries
}
if hc.Timeout == nil && other.Timeout != nil {
hc.Timeout = other.Timeout
}
if hc.StartPeriod == nil && other.StartPeriod != nil {
hc.StartPeriod = other.StartPeriod
}
}
func envFiles(name *string, tc TaskConfig, lc Logging, sc map[string]*SidecarConfig) map[string]string {
envFiles := make(map[string]string)
// Grab the workload container's env file, if present.
envFiles[aws.StringValue(name)] = aws.StringValue(tc.EnvFile)
// Grab sidecar env files, if present.
for sidecarName, sidecar := range sc {
envFiles[sidecarName] = aws.StringValue(sidecar.EnvFile)
}
// If the Firelens Sidecar Pattern has an env file specified, get it as well.
envFiles[FirelensContainerName] = aws.StringValue(lc.EnvFile)
return envFiles
}
func buildArgs(contextDir string, buildArgs map[string]*DockerBuildArgs, sc map[string]*SidecarConfig) (map[string]*DockerBuildArgs, error) {
for name, config := range sc {
if _, ok := config.ImageURI(); !ok {
buildArgs[name] = config.Image.Advanced.BuildConfig(contextDir)
}
}
return buildArgs, nil
}
| 441 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestExec_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct ExecuteCommand
wantedError error
}{
"use default with empty value": {
inContent: []byte(`exec:
count: 1`),
wantedStruct: ExecuteCommand{
Enable: aws.Bool(false),
},
},
"use default without any input": {
inContent: []byte(`count: 1`),
wantedStruct: ExecuteCommand{
Enable: aws.Bool(false),
},
},
"simple enable": {
inContent: []byte(`exec: true`),
wantedStruct: ExecuteCommand{
Enable: aws.Bool(true),
},
},
"with config": {
inContent: []byte(`exec:
enable: true`),
wantedStruct: ExecuteCommand{
Enable: aws.Bool(false),
Config: ExecuteCommandConfig{
Enable: aws.Bool(true),
},
},
},
"Error if unmarshalable": {
inContent: []byte(`exec:
badfield: OH NOES
otherbadfield: DOUBLE BAD`),
wantedError: errUnmarshalExec,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
b := TaskConfig{
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
}
err := yaml.Unmarshal(tc.inContent, &b)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
// check memberwise dereferenced pointer equality
require.Equal(t, tc.wantedStruct.Enable, b.ExecuteCommand.Enable)
require.Equal(t, tc.wantedStruct.Config, b.ExecuteCommand.Config)
}
})
}
}
func TestVariable_UnmarshalYAML(t *testing.T) {
type mockParentField struct {
Variables map[string]Variable `yaml:"variables"`
}
testCases := map[string]struct {
in []byte
wanted mockParentField
wantedError error
}{
"unmarshal plain string": {
in: []byte(`
variables:
LOG_LEVEL: DEBUG
`),
wanted: mockParentField{
Variables: map[string]Variable{
"LOG_LEVEL": {
stringOrFromCFN{
Plain: stringP("DEBUG"),
},
},
},
},
},
"unmarshal import name": {
in: []byte(`
variables:
DB_NAME:
from_cfn: MyUserDB
`),
wanted: mockParentField{
Variables: map[string]Variable{
"DB_NAME": {
stringOrFromCFN{
FromCFN: fromCFN{
Name: stringP("MyUserDB"),
},
},
},
},
},
},
"nothing to unmarshal": {
in: []byte(`other_field: yo`),
},
"fail to unmarshal": {
in: []byte(`
variables:
erroneous:
big_mistake: being made`),
wantedError: errors.New(`unmarshal "variables": cannot unmarshal field to a string or into a map`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
var s mockParentField
err := yaml.Unmarshal(tc.in, &s)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wanted, s)
}
})
}
}
func TestVariable_RequiresImport(t *testing.T) {
testCases := map[string]struct {
in Variable
wanted bool
}{
"requires import": {
in: Variable{
stringOrFromCFN{
FromCFN: fromCFN{
Name: stringP("prod-MyDB"),
},
},
},
wanted: true,
},
"does not require import if it is a plain value": {
in: Variable{
stringOrFromCFN{
Plain: stringP("plain"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.RequiresImport())
})
}
}
func TestVariable_Value(t *testing.T) {
testCases := map[string]struct {
in Variable
wanted string
}{
"requires import": {
in: Variable{
stringOrFromCFN{
FromCFN: fromCFN{
Name: stringP("prod-MyDB"),
},
},
},
wanted: "prod-MyDB",
},
"does not require import if it is a plain value": {
in: Variable{
stringOrFromCFN{
Plain: stringP("plain"),
},
},
wanted: "plain",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.Value())
})
}
}
func TestSecret_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
in string
wanted Secret
wantedErr error
}{
"should return an error if the input cannot be unmarshal to a Secret": {
in: "key: value",
wantedErr: errors.New(`cannot marshal "secret" field to a string or "secretsmanager" object`),
},
"should be able to unmarshal a plain SSM parameter name": {
in: "/github/token",
wanted: Secret{
from: stringOrFromCFN{
Plain: aws.String("/github/token"),
},
},
},
"should be able to unmarshal an imported SSM parameter name from other cloudformation stack": {
in: `from_cfn: "stack-SSMGHTokenName"`,
wanted: Secret{
from: stringOrFromCFN{
FromCFN: fromCFN{
Name: aws.String("stack-SSMGHTokenName"),
},
},
},
},
"should be able to unmarshal a plain SecretsManager ARN": {
in: "arn:aws:secretsmanager:us-west-2:111122223333:secret:aes128-1a2b3c",
wanted: Secret{
from: stringOrFromCFN{
Plain: aws.String("arn:aws:secretsmanager:us-west-2:111122223333:secret:aes128-1a2b3c"),
},
},
},
"should be able to unmarshal a SecretsManager name": {
in: "secretsmanager: aes128-1a2b3c",
wanted: Secret{fromSecretsManager: secretsManagerSecret{Name: aws.String("aes128-1a2b3c")}},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
actual := Secret{}
// WHEN
err := yaml.Unmarshal([]byte(tc.in), &actual)
// THEN
if tc.wantedErr == nil {
require.NoError(t, err)
require.Equal(t, tc.wanted, actual)
} else {
require.EqualError(t, err, tc.wantedErr.Error())
}
})
}
}
func TestSecret_IsSecretsManagerName(t *testing.T) {
testCases := map[string]struct {
in Secret
wanted bool
}{
"should return false if the secret refers to an SSM parameter": {
in: Secret{
from: stringOrFromCFN{
Plain: aws.String("/github/token"),
},
},
},
"should return true if the secret refers to a SecretsManager secret name": {
in: Secret{fromSecretsManager: secretsManagerSecret{Name: aws.String("aes128-1a2b3c")}},
wanted: true,
},
"should return false if the secret is imported": {
in: Secret{
from: stringOrFromCFN{
FromCFN: fromCFN{aws.String("stack-SSMGHTokenName")},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.IsSecretsManagerName())
})
}
}
func TestSSMOrSecretARN_RequiresImport(t *testing.T) {
testCases := map[string]struct {
in Secret
wanted bool
}{
"should return false if secret is plain": {
in: Secret{
from: stringOrFromCFN{
Plain: aws.String("aes128-1a2b3c"),
},
},
},
"should return true if secret is imported": {
in: Secret{
from: stringOrFromCFN{
FromCFN: fromCFN{
Name: aws.String("stack-SSMGHTokenName"),
},
},
},
wanted: true,
},
"should return false if secret is from secrets manager": {
in: Secret{
fromSecretsManager: secretsManagerSecret{
Name: aws.String("aes128-1a2b3c"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.RequiresImport())
})
}
}
func TestSecret_Value(t *testing.T) {
testCases := map[string]struct {
in Secret
wanted string
}{
"should return the SSM parameter name if the secret is just a string": {
in: Secret{
from: stringOrFromCFN{
Plain: aws.String("/github/token"),
},
},
wanted: "/github/token",
},
"should return the imported name of the SSM parameter or secretARN": {
in: Secret{
from: stringOrFromCFN{
FromCFN: fromCFN{
Name: aws.String("stack-SSMGHTokenName"),
},
},
},
wanted: "stack-SSMGHTokenName",
},
"should return the SecretsManager secret name when the secret is from SecretsManager": {
in: Secret{fromSecretsManager: secretsManagerSecret{Name: aws.String("aes128-1a2b3c")}},
wanted: "aes128-1a2b3c",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.Value())
})
}
}
func TestSecretsManagerSecret_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in secretsManagerSecret
wanted bool
}{
"should return true on empty struct": {in: secretsManagerSecret{}, wanted: true},
"should return false if the name is provided": {
in: secretsManagerSecret{
Name: aws.String("aes128-1a2b3c"),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.IsEmpty())
})
}
}
func TestLogging_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in Logging
wanted bool
}{
"empty logging": {
in: Logging{},
wanted: true,
},
"non empty logging": {
in: Logging{
SecretOptions: map[string]Secret{
"secret1": {
from: stringOrFromCFN{
Plain: aws.String("value1"),
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.IsEmpty()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestLogging_LogImage(t *testing.T) {
testCases := map[string]struct {
inputImage *string
wantedImage *string
}{
"Image specified": {
inputImage: aws.String("nginx:why-on-earth"),
wantedImage: aws.String("nginx:why-on-earth"),
},
"no image specified": {
inputImage: nil,
wantedImage: aws.String(defaultFluentbitImage),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
l := Logging{
Image: tc.inputImage,
}
got := l.LogImage()
require.Equal(t, tc.wantedImage, got)
})
}
}
func TestLogging_GetEnableMetadata(t *testing.T) {
testCases := map[string]struct {
enable *bool
wanted *string
}{
"specified true": {
enable: aws.Bool(true),
wanted: aws.String("true"),
},
"specified false": {
enable: aws.Bool(false),
wanted: aws.String("false"),
},
"not specified": {
enable: nil,
wanted: aws.String("true"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
l := Logging{
EnableMetadata: tc.enable,
}
got := l.GetEnableMetadata()
require.Equal(t, tc.wanted, got)
})
}
}
func Test_ImageURI(t *testing.T) {
testCases := map[string]struct {
in SidecarConfig
wantedURI string
wantedOk bool
}{
"empty SidecarConfig": {},
"should return URI if provided directly through `image` ": {
in: SidecarConfig{
Image: Union[*string, ImageLocationOrBuild]{
Basic: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
},
wantedURI: "123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon",
wantedOk: true,
},
"should return the URI if provided through `image.location` field": {
in: SidecarConfig{
Image: Union[*string, ImageLocationOrBuild]{
Advanced: ImageLocationOrBuild{
Location: aws.String("123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon"),
},
},
},
wantedURI: "123456789012.dkr.ecr.us-east-2.amazonaws.com/xray-daemon",
wantedOk: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
uri, ok := tc.in.ImageURI()
// THEN
require.Equal(t, tc.wantedURI, uri)
require.Equal(t, tc.wantedOk, ok)
})
}
}
| 521 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"path/filepath"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/manifest/manifestinfo"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestImage_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedError error
}{
"error if both build and location are set": {
inContent: []byte(`build: mockBuild
location: mockLocation`),
wantedError: fmt.Errorf(`must specify one of "build" and "location"`),
},
"success": {
inContent: []byte(`location: mockLocation`),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
i := Image{}
err := yaml.Unmarshal(tc.inContent, &i)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, "mockLocation", aws.StringValue(i.Location))
}
})
}
}
func TestEntryPointOverride_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct EntryPointOverride
wantedError error
}{
"Entrypoint specified in string": {
inContent: []byte(`entrypoint: echo hello`),
wantedStruct: EntryPointOverride{
String: aws.String("echo hello"),
StringSlice: nil,
},
},
"Entrypoint specified in slice of strings": {
inContent: []byte(`entrypoint: ["/bin/sh", "-c"]`),
wantedStruct: EntryPointOverride{
String: nil,
StringSlice: []string{"/bin/sh", "-c"},
},
},
"Error if unmarshalable": {
inContent: []byte(`entrypoint: {"/bin/sh", "-c"}`),
wantedStruct: EntryPointOverride{
String: nil,
StringSlice: nil,
},
wantedError: errUnmarshalEntryPoint,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
e := ImageOverride{
EntryPoint: EntryPointOverride{
String: aws.String("wrong"),
},
}
err := yaml.Unmarshal(tc.inContent, &e)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
// check memberwise dereferenced pointer equality
require.Equal(t, tc.wantedStruct.StringSlice, e.EntryPoint.StringSlice)
require.Equal(t, tc.wantedStruct.String, e.EntryPoint.String)
}
})
}
}
func TestEntryPointOverride_ToStringSlice(t *testing.T) {
testCases := map[string]struct {
inEntryPointOverride EntryPointOverride
wantedSlice []string
wantedError error
}{
"Both fields are empty": {
inEntryPointOverride: EntryPointOverride{
String: nil,
StringSlice: nil,
},
wantedSlice: nil,
},
"Given a string": {
inEntryPointOverride: EntryPointOverride{
String: aws.String(`read "some command"`),
StringSlice: nil,
},
wantedSlice: []string{"read", "some command"},
},
"Given a string slice": {
inEntryPointOverride: EntryPointOverride{
String: nil,
StringSlice: []string{"/bin/sh", "-c"},
},
wantedSlice: []string{"/bin/sh", "-c"},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
out, err := tc.inEntryPointOverride.ToStringSlice()
require.NoError(t, err)
require.Equal(t, tc.wantedSlice, out)
})
}
}
func TestCommandOverride_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct CommandOverride
wantedError error
}{
"Entrypoint specified in string": {
inContent: []byte(`command: echo hello`),
wantedStruct: CommandOverride{
String: aws.String("echo hello"),
StringSlice: nil,
},
},
"Entrypoint specified in slice of strings": {
inContent: []byte(`command: ["--version"]`),
wantedStruct: CommandOverride{
String: nil,
StringSlice: []string{"--version"},
},
},
"Error if unmarshalable": {
inContent: []byte(`command: {-c}`),
wantedStruct: CommandOverride{
String: nil,
StringSlice: nil,
},
wantedError: errUnmarshalCommand,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
e := ImageOverride{
Command: CommandOverride{
String: aws.String("wrong"),
},
}
err := yaml.Unmarshal(tc.inContent, &e)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
// check memberwise dereferenced pointer equality
require.Equal(t, tc.wantedStruct.StringSlice, e.Command.StringSlice)
require.Equal(t, tc.wantedStruct.String, e.Command.String)
}
})
}
}
func TestCommandOverride_ToStringSlice(t *testing.T) {
testCases := map[string]struct {
inCommandOverrides CommandOverride
wantedSlice []string
}{
"Both fields are empty": {
inCommandOverrides: CommandOverride{
String: nil,
StringSlice: nil,
},
wantedSlice: nil,
},
"Given a string": {
inCommandOverrides: CommandOverride{
String: aws.String(`-c read "some command"`),
StringSlice: nil,
},
wantedSlice: []string{"-c", "read", "some command"},
},
"Given a string slice": {
inCommandOverrides: CommandOverride{
String: nil,
StringSlice: []string{"-c", "read", "some", "command"},
},
wantedSlice: []string{"-c", "read", "some", "command"},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
out, err := tc.inCommandOverrides.ToStringSlice()
require.NoError(t, err)
require.Equal(t, tc.wantedSlice, out)
})
}
}
func TestBuildArgs_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct BuildArgsOrString
wantedError error
}{
"legacy case: simple build string": {
inContent: []byte(`build: ./Dockerfile`),
wantedStruct: BuildArgsOrString{
BuildString: aws.String("./Dockerfile"),
},
},
"Dockerfile specified in build opts": {
inContent: []byte(`build:
dockerfile: path/to/Dockerfile
`),
wantedStruct: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("path/to/Dockerfile"),
},
BuildString: nil,
},
},
"Dockerfile context, and args specified in build opts": {
inContent: []byte(`build:
dockerfile: path/to/Dockerfile
args:
arg1: value1
bestdog: bowie
context: path/to/source`),
wantedStruct: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("path/to/Dockerfile"),
Context: aws.String("path/to/source"),
Args: map[string]string{
"arg1": "value1",
"bestdog": "bowie",
},
},
BuildString: nil,
},
},
"Dockerfile with cache from and target build opts": {
inContent: []byte(`build:
cache_from:
- foo/bar:latest
- foo/bar/baz:1.2.3
target: foobar`),
wantedStruct: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Target: aws.String("foobar"),
CacheFrom: []string{
"foo/bar:latest",
"foo/bar/baz:1.2.3",
},
},
BuildString: nil,
},
},
"Error if unmarshalable": {
inContent: []byte(`build:
badfield: OH NOES
otherbadfield: DOUBLE BAD`),
wantedError: errUnmarshalBuildOpts,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
b := Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: BuildArgsOrString{
BuildString: aws.String("./default"),
},
},
}
err := yaml.Unmarshal(tc.inContent, &b)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
// check memberwise dereferenced pointer equality
require.Equal(t, tc.wantedStruct.BuildString, b.Build.BuildString)
require.Equal(t, tc.wantedStruct.BuildArgs.Context, b.Build.BuildArgs.Context)
require.Equal(t, tc.wantedStruct.BuildArgs.Dockerfile, b.Build.BuildArgs.Dockerfile)
require.Equal(t, tc.wantedStruct.BuildArgs.Args, b.Build.BuildArgs.Args)
require.Equal(t, tc.wantedStruct.BuildArgs.Target, b.Build.BuildArgs.Target)
require.Equal(t, tc.wantedStruct.BuildArgs.CacheFrom, b.Build.BuildArgs.CacheFrom)
}
})
}
}
func TestPlatformArgsOrString_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct PlatformArgsOrString
wantedError error
}{
"returns error if both string and args specified": {
inContent: []byte(`platform: linux/amd64
osfamily: linux
architecture: amd64`),
wantedError: errors.New("yaml: line 2: mapping values are not allowed in this context"),
},
"error if unmarshalable": {
inContent: []byte(`platform:
ohess: linus
archie: leg64`),
wantedError: errUnmarshalPlatformOpts,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
p := TaskConfig{}
err := yaml.Unmarshal(tc.inContent, &p)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedStruct.PlatformString, p.Platform.PlatformString)
require.Equal(t, tc.wantedStruct.PlatformArgs.OSFamily, p.Platform.PlatformArgs.OSFamily)
require.Equal(t, tc.wantedStruct.PlatformArgs.Arch, p.Platform.PlatformArgs.Arch)
}
})
}
}
func TestServiceConnectBoolOrArgs_ServiceConnectEnabled(t *testing.T) {
testCases := map[string]struct {
mft *ServiceConnectBoolOrArgs
wanted bool
}{
"disabled by default": {
mft: &ServiceConnectBoolOrArgs{},
wanted: false,
},
"set by bool": {
mft: &ServiceConnectBoolOrArgs{
EnableServiceConnect: aws.Bool(true),
},
wanted: true,
},
"set by args": {
mft: &ServiceConnectBoolOrArgs{
ServiceConnectArgs: ServiceConnectArgs{
Alias: aws.String("api"),
},
},
wanted: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
enabled := tc.mft.Enabled()
// THEN
require.Equal(t, tc.wanted, enabled)
})
}
}
func TestServiceConnect_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct ServiceConnectBoolOrArgs
wantedError error
}{
"returns error if both bool and args specified": {
inContent: []byte(`connect: true
alias: api`),
wantedError: errors.New("yaml: line 2: mapping values are not allowed in this context"),
},
"error if unmarshalable": {
inContent: []byte(`connect:
ohess: linus
archie: leg64`),
wantedError: errUnmarshalServiceConnectOpts,
},
"success": {
inContent: []byte(`connect:
alias: api`),
wantedStruct: ServiceConnectBoolOrArgs{
ServiceConnectArgs: ServiceConnectArgs{
Alias: aws.String("api"),
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
v := NetworkConfig{}
err := yaml.Unmarshal(tc.inContent, &v)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedStruct, v.Connect)
}
})
}
}
func TestPlacementArgOrString_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct PlacementArgOrString
wantedError error
}{
"returns error if both string and args specified": {
inContent: []byte(`placement: private
subnets: ["id1", "id2"]`),
wantedError: errors.New("yaml: line 2: mapping values are not allowed in this context"),
},
"error if unmarshalable": {
inContent: []byte(`placement:
ohess: linus
archie: leg64`),
wantedError: errUnmarshalPlacementOpts,
},
"success": {
inContent: []byte(`placement:
subnets: ["id1", "id2"]`),
wantedStruct: PlacementArgOrString{
PlacementArgs: PlacementArgs{
Subnets: SubnetListOrArgs{
IDs: []string{"id1", "id2"},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
v := vpcConfig{}
err := yaml.Unmarshal(tc.inContent, &v)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedStruct.PlacementString, v.Placement.PlacementString)
require.Equal(t, tc.wantedStruct.PlacementArgs.Subnets, v.Placement.PlacementArgs.Subnets)
}
})
}
}
func TestSubnetListOrArgs_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct SubnetListOrArgs
wantedError error
}{
"returns error if both string slice and args specified": {
inContent: []byte(`subnets: ["id1", "id2"]
from_tags:
- foo: bar`),
wantedError: errors.New("yaml: line 1: did not find expected key"),
},
"error if unmarshalable": {
inContent: []byte(`subnets:
ohess: linus
archie: leg64`),
wantedError: errUnmarshalSubnetsOpts,
},
"success with string slice": {
inContent: []byte(`subnets: ["id1", "id2"]`),
wantedStruct: SubnetListOrArgs{
IDs: []string{"id1", "id2"},
},
},
"success with args": {
inContent: []byte(`subnets:
from_tags:
foo: bar`),
wantedStruct: SubnetListOrArgs{
SubnetArgs: SubnetArgs{
FromTags: map[string]StringSliceOrString{
"foo": {String: aws.String("bar")},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
v := PlacementArgs{}
err := yaml.Unmarshal(tc.inContent, &v)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedStruct.IDs, v.Subnets.IDs)
require.Equal(t, tc.wantedStruct.FromTags, v.Subnets.FromTags)
}
})
}
}
func TestPlatformArgsOrString_OS(t *testing.T) {
linux := PlatformString("linux/amd64")
testCases := map[string]struct {
in *PlatformArgsOrString
wanted string
}{
"should return os when platform is of string format 'os/arch'": {
in: &PlatformArgsOrString{
PlatformString: &linux,
},
wanted: "linux",
},
"should return OS when platform is a map 2019 core": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2019_core"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2019_core",
},
"should return lowercase OS 2019 core": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("wINdows_sERver_2019_cORe"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2019_core",
},
"should return OS when platform is a map 2019 full": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2019_full"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2019_full",
},
"should return lowercase OS 2019 full": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("wINdows_sERver_2019_fUll"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2019_full",
},
"should return OS when platform is a map 2022 core": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2022_core"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2022_core",
},
"should return lowercase OS 2022 core": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("wINdows_sERver_2022_cORe"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2022_core",
},
"should return OS when platform is a map 2022 full": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2022_full"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2022_full",
},
"should return lowercase OS 2022 full": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("wINdows_sERver_2022_fUll"),
Arch: aws.String("x86_64"),
},
},
wanted: "windows_server_2022_full",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.OS())
})
}
}
func TestPlatformArgsOrString_Arch(t *testing.T) {
testCases := map[string]struct {
in *PlatformArgsOrString
wanted string
}{
"should return arch when platform is of string format 'os/arch'": {
in: &PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("windows/arm")),
},
wanted: "arm",
},
"should return arch when platform is a map 2019 core": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2019_core"),
Arch: aws.String("x86_64"),
},
},
wanted: "x86_64",
},
"should return arch when platform is a map 2019 full": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2019_full"),
Arch: aws.String("x86_64"),
},
},
wanted: "x86_64",
},
"should return arch when platform is a map 2022 core": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2022_core"),
Arch: aws.String("x86_64"),
},
},
wanted: "x86_64",
},
"should return arch when platform is a map 2022 full": {
in: &PlatformArgsOrString{
PlatformArgs: PlatformArgs{
OSFamily: aws.String("windows_server_2022_full"),
Arch: aws.String("x86_64"),
},
},
wanted: "x86_64",
},
"should return lowercase arch": {
in: &PlatformArgsOrString{
PlatformString: (*PlatformString)(aws.String("windows/aMd64")),
},
wanted: "amd64",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.Arch())
})
}
}
func TestRedirectPlatform(t *testing.T) {
testCases := map[string]struct {
inOS string
inArch string
inWorkloadType string
wantedPlatform string
wantedError error
}{
"returns nil if default platform": {
inOS: "linux",
inArch: "amd64",
inWorkloadType: manifestinfo.LoadBalancedWebServiceType,
wantedPlatform: "",
wantedError: nil,
},
"returns error if App Runner + Windows": {
inOS: "windows",
inArch: "amd64",
inWorkloadType: manifestinfo.RequestDrivenWebServiceType,
wantedPlatform: "",
wantedError: errors.New("Windows is not supported for App Runner services"),
},
"targets x86_64 if ARM architecture passed in": {
inOS: "linux",
inArch: "arm64",
wantedPlatform: "linux/x86_64",
wantedError: nil,
},
"returns non-default os as is": {
inOS: "windows",
inArch: "amd64",
wantedPlatform: "windows/x86_64",
wantedError: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
platform, err := RedirectPlatform(tc.inOS, tc.inArch, tc.inWorkloadType)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedPlatform, platform)
}
})
}
}
func TestBuildConfig(t *testing.T) {
mockWsRoot := "/root/dir"
testCases := map[string]struct {
inBuild BuildArgsOrString
wantedBuild DockerBuildArgs
}{
"simple case: BuildString path to dockerfile": {
inBuild: BuildArgsOrString{
BuildString: aws.String("my/Dockerfile"),
},
wantedBuild: DockerBuildArgs{
Dockerfile: aws.String(filepath.Join(mockWsRoot, "my/Dockerfile")),
Context: aws.String(filepath.Join(mockWsRoot, "my")),
},
},
"Different context than dockerfile": {
inBuild: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("build/dockerfile"),
Context: aws.String("cmd/main"),
},
},
wantedBuild: DockerBuildArgs{
Dockerfile: aws.String(filepath.Join(mockWsRoot, "build/dockerfile")),
Context: aws.String(filepath.Join(mockWsRoot, "cmd/main")),
},
},
"no dockerfile specified": {
inBuild: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Context: aws.String("cmd/main"),
},
},
wantedBuild: DockerBuildArgs{
Dockerfile: aws.String(filepath.Join(mockWsRoot, "cmd", "main", "Dockerfile")),
Context: aws.String(filepath.Join(mockWsRoot, "cmd", "main")),
},
},
"no dockerfile or context specified": {
inBuild: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Args: map[string]string{
"goodDog": "bowie",
},
},
},
wantedBuild: DockerBuildArgs{
Dockerfile: aws.String(filepath.Join(mockWsRoot, "Dockerfile")),
Context: aws.String(mockWsRoot),
Args: map[string]string{
"goodDog": "bowie",
},
},
},
"including args": {
inBuild: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Dockerfile: aws.String("my/Dockerfile"),
Args: map[string]string{
"goodDog": "bowie",
"badGoose": "HONK",
},
},
},
wantedBuild: DockerBuildArgs{
Dockerfile: aws.String(filepath.Join(mockWsRoot, "my/Dockerfile")),
Context: aws.String(filepath.Join(mockWsRoot, "my")),
Args: map[string]string{
"goodDog": "bowie",
"badGoose": "HONK",
},
},
},
"including build options": {
inBuild: BuildArgsOrString{
BuildArgs: DockerBuildArgs{
Target: aws.String("foobar"),
CacheFrom: []string{
"foo/bar:latest",
"foo/bar/baz:1.2.3",
},
},
},
wantedBuild: DockerBuildArgs{
Dockerfile: aws.String(filepath.Join(mockWsRoot, "Dockerfile")),
Context: aws.String(mockWsRoot),
Target: aws.String("foobar"),
CacheFrom: []string{
"foo/bar:latest",
"foo/bar/baz:1.2.3",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
s := Image{
ImageLocationOrBuild: ImageLocationOrBuild{
Build: tc.inBuild,
},
}
got := s.BuildConfig(mockWsRoot)
require.Equal(t, tc.wantedBuild, *got)
})
}
}
func TestNetworkConfig_IsEmpty(t *testing.T) {
testCases := map[string]struct {
in NetworkConfig
wanted bool
}{
"empty network config": {
in: NetworkConfig{},
wanted: true,
},
"non empty network config": {
in: NetworkConfig{
VPC: vpcConfig{
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("group"),
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
got := tc.in.IsEmpty()
// THEN
require.Equal(t, tc.wanted, got)
})
}
}
func TestSecurityGroupsConfig_GetIDs(t *testing.T) {
testCases := map[string]struct {
in SecurityGroupsIDsOrConfig
wanted []stringOrFromCFN
}{
"nil returned when no security groups are specified": {
in: SecurityGroupsIDsOrConfig{},
wanted: nil,
},
"security groups in map are returned": {
in: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("group"),
},
{
Plain: aws.String("group1"),
},
{
FromCFN: fromCFN{
Name: aws.String("sg-001"),
},
},
},
},
},
wanted: []stringOrFromCFN{
{
Plain: aws.String("group"),
},
{
Plain: aws.String("group1"),
},
{
FromCFN: fromCFN{
Name: aws.String("sg-001"),
},
},
},
},
"nil returned when security groups in map are empty": {
in: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{},
},
},
wanted: nil,
},
"security groups in array are returned": {
in: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("123"),
},
{
Plain: aws.String("45"),
},
{
FromCFN: fromCFN{
Name: aws.String("sg-001"),
},
},
},
},
wanted: []stringOrFromCFN{
{Plain: aws.String("123")},
{Plain: aws.String("45")},
{FromCFN: fromCFN{
Name: aws.String("sg-001"),
}},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
sgs := tc.in.GetIDs()
// THEN
require.Equal(t, tc.wanted, sgs)
})
}
}
func TestSecurityGroupsConfig_IsDefaultSecurityGroupDenied(t *testing.T) {
testCases := map[string]struct {
in SecurityGroupsIDsOrConfig
wanted bool
}{
"default security group is applied when no vpc security config is present": {
wanted: false,
},
"default security group is applied when deny_default is not specified in SG config": {
in: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("1"),
},
},
},
},
wanted: false,
},
"default security group is applied when deny_default is false in SG config": {
in: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("1"),
},
},
DenyDefault: aws.Bool(false),
},
},
wanted: false,
},
"default security group is applied when security group array is specified": {
in: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("1"),
},
},
},
wanted: false,
},
"default security group is not applied when default_deny is true": {
in: SecurityGroupsIDsOrConfig{
AdvancedConfig: SecurityGroupsConfig{
DenyDefault: aws.Bool(true),
},
},
wanted: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// WHEN
actualIsDefaultSGDenied := tc.in.IsDefaultSecurityGroupDenied()
// THEN
require.Equal(t, tc.wanted, actualIsDefaultSGDenied)
})
}
}
func TestNetworkConfig_UnmarshalYAML(t *testing.T) {
var (
trueValue = true
)
testCases := map[string]struct {
data string
wantedConfig *NetworkConfig
wantedErr error
}{
"defaults to public placement if vpc is empty": {
data: `
network:
vpc:
`,
wantedConfig: &NetworkConfig{
VPC: vpcConfig{},
},
},
"unmarshals successfully for public placement with security groups": {
data: `
network:
vpc:
placement: 'public'
security_groups:
- 'sg-1234'
- 'sg-4567'
- from_cfn: 'dbsg-001'
`,
wantedConfig: &NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{
PlacementString: placementStringP(PublicSubnetPlacement),
},
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: []stringOrFromCFN{
{
Plain: aws.String("sg-1234"),
},
{
Plain: aws.String("sg-4567"),
},
{
FromCFN: fromCFN{
Name: aws.String("dbsg-001"),
},
},
},
AdvancedConfig: SecurityGroupsConfig{},
},
},
},
},
"unmarshal is successful for security groups specified in config": {
data: `
network:
vpc:
security_groups:
groups:
- 'sg-1234'
- 'sg-4567'
- from_cfn: 'dbsg-001'
deny_default: true
`,
wantedConfig: &NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{},
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: nil,
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("sg-1234"),
},
{
Plain: aws.String("sg-4567"),
},
{
FromCFN: fromCFN{
Name: aws.String("dbsg-001"),
},
},
},
DenyDefault: &trueValue,
},
},
},
},
},
"unmarshal is successful for security groups specified in config without default deny": {
data: `
network:
vpc:
security_groups:
groups: ['sg-1234', 'sg-4567']
`,
wantedConfig: &NetworkConfig{
VPC: vpcConfig{
Placement: PlacementArgOrString{},
SecurityGroups: SecurityGroupsIDsOrConfig{
IDs: nil,
AdvancedConfig: SecurityGroupsConfig{
SecurityGroups: []stringOrFromCFN{
{
Plain: aws.String("sg-1234"),
},
{
Plain: aws.String("sg-4567"),
},
},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
type manifest struct {
Network *NetworkConfig `yaml:"network"`
}
var m manifest
// WHEN
err := yaml.Unmarshal([]byte(tc.data), &m)
// THEN
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedConfig, m.Network)
}
})
}
}
func TestDependency_UnmarshalYAML(t *testing.T) {
testCases := map[string]struct {
inContent []byte
wantedStruct Image
wantedError error
}{
"Unspecified optional dependencies don't appear in image": {
inContent: []byte(``),
wantedStruct: Image{},
},
"Empty dependencies don't appear in image": {
inContent: []byte(`depends_on:`),
wantedStruct: Image{},
},
"Error when unmarshallable": {
inContent: []byte(`depends_on:
frontend: coolwebsite
sidecar2: wheels`),
wantedStruct: Image{
DependsOn: map[string]string{
"frontend": "coolwebsite",
"sidecar2": "wheels",
},
},
wantedError: errors.New("yaml: line 2: did not find expected key"),
},
"Valid yaml specified": {
inContent: []byte(`depends_on:
frontend: coolwebsite
sidecar2: wheels`),
wantedStruct: Image{
DependsOn: map[string]string{
"frontend": "coolwebsite",
"sidecar2": "wheels",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
i := Image{}
err := yaml.Unmarshal(tc.inContent, &i)
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedStruct.DependsOn, i.DependsOn)
}
})
}
}
func TestUnmarshalPublish(t *testing.T) {
testCases := map[string]struct {
inContent string
wantedPublish PublishConfig
wantedErr error
}{
"Valid publish yaml": {
inContent: `
topics:
- name: tests
`,
wantedPublish: PublishConfig{
Topics: []Topic{
{
Name: aws.String("tests"),
},
},
},
},
"Valid publish yaml with fifo topic enabled": {
inContent: `
topics:
- name: tests
fifo: true
`,
wantedPublish: PublishConfig{
Topics: []Topic{
{
Name: aws.String("tests"),
FIFO: FIFOTopicAdvanceConfigOrBool{
Enable: aws.Bool(true),
},
},
},
},
},
"Valid publish yaml with advanced fifo topic": {
inContent: `
topics:
- name: tests
fifo:
content_based_deduplication: true
`,
wantedPublish: PublishConfig{
Topics: []Topic{
{
Name: aws.String("tests"),
FIFO: FIFOTopicAdvanceConfigOrBool{
Advanced: FIFOTopicAdvanceConfig{
ContentBasedDeduplication: aws.Bool(true),
},
},
},
},
},
},
"Invalid publish yaml with advanced fifo topic": {
inContent: `
topics:
- name: tests
fifo: apple
`,
wantedErr: errors.New(`unable to unmarshal "fifo" field into boolean or compose-style map`),
},
"Error when unmarshalable": {
inContent: `
topics: abc
`,
wantedErr: errors.New("yaml: unmarshal errors:\n line 2: cannot unmarshal !!str `abc` into []manifest.Topic"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
p := PublishConfig{}
err := yaml.Unmarshal([]byte(tc.inContent), &p)
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedPublish, p)
}
})
}
}
| 1,322 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package manifestinfo provides access to information embedded in a manifest.
package manifestinfo
const (
// LoadBalancedWebServiceType is a web service with a load balancer and Fargate as compute.
LoadBalancedWebServiceType = "Load Balanced Web Service"
// RequestDrivenWebServiceType is a Request-Driven Web Service managed by AppRunner
RequestDrivenWebServiceType = "Request-Driven Web Service"
// BackendServiceType is a service that cannot be accessed from the internet but can be reached from other services.
BackendServiceType = "Backend Service"
// WorkerServiceType is a worker service that manages the consumption of messages.
WorkerServiceType = "Worker Service"
// StaticSiteType is a static site service that manages static assets.
StaticSiteType = "Static Site"
// ScheduledJobType is a recurring ECS Fargate task which runs on a schedule.
ScheduledJobType = "Scheduled Job"
)
// ServiceTypes returns the list of supported service manifest types.
func ServiceTypes() []string {
return []string{
RequestDrivenWebServiceType,
LoadBalancedWebServiceType,
BackendServiceType,
WorkerServiceType,
StaticSiteType,
}
}
// JobTypes returns the list of supported job manifest types.
func JobTypes() []string {
return []string{
ScheduledJobType,
}
}
// WorkloadTypes returns the list of all manifest types.
func WorkloadTypes() []string {
return append(ServiceTypes(), JobTypes()...)
}
// IsTypeAService returns if manifest type is service.
func IsTypeAService(t string) bool {
for _, serviceType := range ServiceTypes() {
if t == serviceType {
return true
}
}
return false
}
// IsTypeAJob returns if manifest type is job.
func IsTypeAJob(t string) bool {
for _, jobType := range JobTypes() {
if t == jobType {
return true
}
}
return false
}
| 64 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifestinfo
import (
"testing"
"github.com/stretchr/testify/require"
)
func Test_IsTypeAService(t *testing.T) {
testCases := map[string]struct {
inType string
wanted bool
}{
"return false if not a service": {
inType: "foobar",
wanted: false,
},
"return true if it is a service": {
inType: LoadBalancedWebServiceType,
wanted: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
actual := IsTypeAService(tc.inType)
require.Equal(t, actual, tc.wanted)
})
}
}
func Test_IsTypeAJob(t *testing.T) {
testCases := map[string]struct {
inType string
wanted bool
}{
"return false if not a job": {
inType: "foobar",
wanted: false,
},
"return true if it is a job": {
inType: ScheduledJobType,
wanted: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
actual := IsTypeAJob(tc.inType)
require.Equal(t, actual, tc.wanted)
})
}
}
| 55 |
copilot-cli | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: ./internal/pkg/manifest/loader.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
ec2 "github.com/aws/copilot-cli/internal/pkg/aws/ec2"
gomock "github.com/golang/mock/gomock"
)
// MocksubnetIDsGetter is a mock of subnetIDsGetter interface.
type MocksubnetIDsGetter struct {
ctrl *gomock.Controller
recorder *MocksubnetIDsGetterMockRecorder
}
// MocksubnetIDsGetterMockRecorder is the mock recorder for MocksubnetIDsGetter.
type MocksubnetIDsGetterMockRecorder struct {
mock *MocksubnetIDsGetter
}
// NewMocksubnetIDsGetter creates a new mock instance.
func NewMocksubnetIDsGetter(ctrl *gomock.Controller) *MocksubnetIDsGetter {
mock := &MocksubnetIDsGetter{ctrl: ctrl}
mock.recorder = &MocksubnetIDsGetterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MocksubnetIDsGetter) EXPECT() *MocksubnetIDsGetterMockRecorder {
return m.recorder
}
// SubnetIDs mocks base method.
func (m *MocksubnetIDsGetter) SubnetIDs(filters ...ec2.Filter) ([]string, error) {
m.ctrl.T.Helper()
varargs := []interface{}{}
for _, a := range filters {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SubnetIDs", varargs...)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SubnetIDs indicates an expected call of SubnetIDs.
func (mr *MocksubnetIDsGetterMockRecorder) SubnetIDs(filters ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetIDs", reflect.TypeOf((*MocksubnetIDsGetter)(nil).SubnetIDs), filters...)
}
// Mockloader is a mock of loader interface.
type Mockloader struct {
ctrl *gomock.Controller
recorder *MockloaderMockRecorder
}
// MockloaderMockRecorder is the mock recorder for Mockloader.
type MockloaderMockRecorder struct {
mock *Mockloader
}
// NewMockloader creates a new mock instance.
func NewMockloader(ctrl *gomock.Controller) *Mockloader {
mock := &Mockloader{ctrl: ctrl}
mock.recorder = &MockloaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *Mockloader) EXPECT() *MockloaderMockRecorder {
return m.recorder
}
// load mocks base method.
func (m *Mockloader) load() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "load")
ret0, _ := ret[0].(error)
return ret0
}
// load indicates an expected call of load.
func (mr *MockloaderMockRecorder) load() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "load", reflect.TypeOf((*Mockloader)(nil).load))
}
| 92 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/spf13/afero"
)
// CDK is an Overrider that can transform a CloudFormation template with the Cloud Development Kit.
type CDK struct {
rootAbsPath string // Absolute path to the overrides/ directory.
execWriter io.Writer // Writer to pipe stdout and stderr content from os/exec calls.
fs afero.Fs // OS file system.
exec struct {
LookPath func(file string) (string, error)
Command func(name string, args ...string) *exec.Cmd
} // For testing os/exec calls.
}
// CDKOpts is optional configuration for initializing a CDK Overrider.
type CDKOpts struct {
ExecWriter io.Writer // Writer to forward stdout and stderr writes from os/exec calls. If nil default to io.Discard.
FS afero.Fs // File system interface. If nil, defaults to the OS file system.
EnvVars map[string]string // Environment variables key value pairs to pass to the "cdk synth" command.
LookPathFn func(executable string) (string, error) // Search for the executable under $PATH. Defaults to exec.LookPath.
CommandFn func(name string, args ...string) *exec.Cmd // Create a new executable command. Defaults to exec.Command rooted at the overrides/ dir.
}
// WithCDK instantiates a new CDK Overrider with root being the path to the overrides/ directory.
func WithCDK(root string, opts CDKOpts) *CDK {
writer := io.Discard
if opts.ExecWriter != nil {
writer = opts.ExecWriter
}
fs := afero.NewOsFs()
if opts.FS != nil {
fs = opts.FS
}
lookPathFn := exec.LookPath
if opts.LookPathFn != nil {
lookPathFn = opts.LookPathFn
}
cmdFn := func(name string, args ...string) *exec.Cmd {
cmd := exec.Command(name, args...)
cmd.Dir = root
envs, idx := make([]string, len(opts.EnvVars)), 0
for k, v := range opts.EnvVars {
envs[idx] = fmt.Sprintf("%s=%s", k, v)
idx += 1
}
cmd.Env = append(os.Environ(), envs...)
return cmd
}
if opts.CommandFn != nil {
cmdFn = opts.CommandFn
}
return &CDK{
rootAbsPath: root,
execWriter: writer,
fs: fs,
exec: struct {
LookPath func(file string) (string, error)
Command func(name string, args ...string) *exec.Cmd
}{
LookPath: lookPathFn,
Command: cmdFn,
},
}
}
// Override returns the extended CloudFormation template body using the CDK.
// In order to ensure the CDK transformations can be applied, Copilot first installs any CDK dependencies
// as well as the toolkit itself.
func (cdk *CDK) Override(body []byte) ([]byte, error) {
if err := cdk.install(); err != nil {
return nil, err
}
out, err := cdk.transform(body)
if err != nil {
return nil, err
}
return cdk.cleanUp(out)
}
func (cdk *CDK) install() error {
if _, err := cdk.exec.LookPath("npm"); err != nil {
return &errNPMUnavailable{parent: err}
}
cmd := cdk.exec.Command("npm", "install")
cmd.Stdout = cdk.execWriter
cmd.Stderr = cdk.execWriter
if err := cmd.Run(); err != nil {
return fmt.Errorf(`run %q: %w`, cmd.String(), err)
}
return nil
}
func (cdk *CDK) transform(body []byte) ([]byte, error) {
buildPath := filepath.Join(cdk.rootAbsPath, ".build")
if err := cdk.fs.MkdirAll(buildPath, 0755); err != nil {
return nil, fmt.Errorf("create %s directory to store the CloudFormation template body: %w", buildPath, err)
}
inputPath := filepath.Join(buildPath, "in.yml")
if err := afero.WriteFile(cdk.fs, inputPath, body, 0644); err != nil {
return nil, fmt.Errorf("write CloudFormation template body content at %s: %w", inputPath, err)
}
// We assume that a node_modules/ dir is present with the CDK downloaded after running "npm install".
// This way clients don't need to install the CDK toolkit separately.
cmd := cdk.exec.Command(filepath.Join("node_modules", ".bin", "cdk"), "synth", "--no-version-reporting")
buf := new(bytes.Buffer)
cmd.Stdout = buf
cmd.Stderr = cdk.execWriter
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf(`run %q: %w`, cmd.String(), err)
}
return buf.Bytes(), nil
}
// cleanUp removes YAML additions that get injected by the CDK that are unnecessary,
// and transforms the Description string of the CloudFormation template to highlight the template is now overridden with the CDK.
func (cdk *CDK) cleanUp(in []byte) ([]byte, error) {
// See [template anatomy]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html
// We ignore Rules on purpose as it's only used by the CDK.
type template struct {
AWSTemplateFormatVersion string `yaml:"AWSTemplateFormatVersion,omitempty"`
Description string `yaml:"Description,omitempty"`
Metadata yaml.Node `yaml:"Metadata,omitempty"`
Parameters map[string]yaml.Node `yaml:"Parameters,omitempty"`
Mappings yaml.Node `yaml:"Mappings,omitempty"`
Conditions yaml.Node `yaml:"Conditions,omitempty"`
Transform yaml.Node `yaml:"Transform,omitempty"`
Resources yaml.Node `yaml:"Resources,omitempty"`
Outputs yaml.Node `yaml:"Outputs,omitempty"`
}
var body template
if err := yaml.Unmarshal(in, &body); err != nil {
return nil, fmt.Errorf("unmarsal CDK transformed YAML template: %w", err)
}
// Augment the description with Copilot and the CDK metrics.
body.Description = fmt.Sprintf("%s using AWS Copilot and CDK.", strings.TrimSuffix(body.Description, "."))
// Get rid of CDK parameters.
delete(body.Parameters, "BootstrapVersion")
out := new(bytes.Buffer)
encoder := yaml.NewEncoder(out)
encoder.SetIndent(2)
if err := encoder.Encode(body); err != nil {
return nil, fmt.Errorf("marshal cleaned up CDK transformed template: %w", err)
}
return out.Bytes(), nil
}
// ScaffoldWithCDK bootstraps a CDK application under dir/ to override the seed CloudFormation resources.
// If the directory is not empty, then returns an error.
func ScaffoldWithCDK(fs afero.Fs, dir string, seeds []template.CFNResource) error {
// If the directory does not exist, [afero.IsEmpty] returns false and an error.
// Therefore, we only want to check if a directory is empty only if it also exists.
exists, _ := afero.Exists(fs, dir)
isEmpty, _ := afero.IsEmpty(fs, dir)
if exists && !isEmpty {
return fmt.Errorf("directory %q is not empty", dir)
}
return templates.WalkOverridesCDKDir(seeds, writeFilesToDir(dir, fs))
}
func writeFilesToDir(dir string, fs afero.Fs) template.WalkDirFunc {
return func(name string, content *template.Content) error {
path := filepath.Join(dir, name)
if err := fs.MkdirAll(filepath.Dir(path), 0755); err != nil {
return fmt.Errorf("make directories along %q: %w", filepath.Dir(path), err)
}
if err := afero.WriteFile(fs, path, content.Bytes(), 0644); err != nil {
return fmt.Errorf("write file at %q: %w", path, err)
}
return nil
}
}
| 201 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"bytes"
"fmt"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
)
func TestCDK_Override(t *testing.T) {
t.Parallel()
t.Run("on install: should return a wrapped error if npm is not available for the users", func(t *testing.T) {
// GIVEN
cdk := WithCDK("", CDKOpts{
FS: afero.NewMemMapFs(),
LookPathFn: func(file string) (string, error) {
return "", fmt.Errorf(`exec: "%s": executable file not found in $PATH`, file)
},
})
// WHEN
_, err := cdk.Override(nil)
// THEN
require.EqualError(t, err, `"npm" cannot be found: "npm" is required to override with the Cloud Development Kit: exec: "npm": executable file not found in $PATH`)
})
t.Run("on install: should return a wrapped error if npm install fails", func(t *testing.T) {
// GIVEN
cdk := WithCDK("", CDKOpts{
FS: afero.NewMemMapFs(),
LookPathFn: func(file string) (string, error) {
return "/bin/npm", nil
},
CommandFn: func(name string, args ...string) *exec.Cmd {
return exec.Command("exit", "42")
},
})
// WHEN
_, err := cdk.Override(nil)
// THEN
require.ErrorContains(t, err, `run "exit 42"`)
})
t.Run("should override the same hidden file on multiple Override calls", func(t *testing.T) {
// GIVEN
mockFS := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend", "overrides")
_ = mockFS.MkdirAll(root, 0755)
mockFS = afero.NewBasePathFs(mockFS, root)
cdk := WithCDK(root, CDKOpts{
ExecWriter: new(bytes.Buffer),
FS: mockFS,
LookPathFn: func(file string) (string, error) {
return "/bin/npm", nil
},
CommandFn: func(name string, args ...string) *exec.Cmd {
return exec.Command("echo")
},
})
// WHEN
_, err := cdk.Override([]byte("first"))
// THEN
require.NoError(t, err)
actual, _ := afero.ReadFile(mockFS, filepath.Join(root, ".build", "in.yml"))
require.Equal(t, []byte("first"), actual, `expected to write "first" to the hidden file`)
// WHEN
_, err = cdk.Override([]byte("second"))
// THEN
require.NoError(t, err)
actual, _ = afero.ReadFile(mockFS, filepath.Join(root, ".build", "in.yml"))
require.Equal(t, []byte("second"), actual, `expected to write "second" to the hidden file`)
})
t.Run("should return a wrapped error if cdk synth fails", func(t *testing.T) {
// GIVEN
cdk := WithCDK("", CDKOpts{
ExecWriter: new(bytes.Buffer),
FS: afero.NewMemMapFs(),
LookPathFn: func(file string) (string, error) {
return "/bin/npm", nil
},
CommandFn: func(name string, args ...string) *exec.Cmd {
if name == filepath.Join("node_modules", ".bin", "cdk") {
return exec.Command("exit", "42")
}
return exec.Command("echo", "success")
},
})
// WHEN
_, err := cdk.Override(nil)
// THEN
require.ErrorContains(t, err, `run "exit 42"`)
})
t.Run("should invoke npm install and cdk synth", func(t *testing.T) {
binPath := filepath.Join("node_modules", ".bin", "cdk")
buf := new(strings.Builder)
cdk := WithCDK("", CDKOpts{
ExecWriter: buf,
FS: afero.NewMemMapFs(),
LookPathFn: func(file string) (string, error) {
return "/bin/npm", nil
},
CommandFn: func(name string, args ...string) *exec.Cmd {
return exec.Command("echo", fmt.Sprintf("Description: %s", strings.Join(append([]string{name}, args...), " ")))
},
})
// WHEN
out, err := cdk.Override(nil)
// THEN
require.NoError(t, err)
require.Contains(t, buf.String(), "npm install")
require.Contains(t, string(out), fmt.Sprintf("%s synth --no-version-reporting", binPath))
})
t.Run("should return the transformed document with CDK metadata stripped and description updated", func(t *testing.T) {
buf := new(strings.Builder)
cdk := WithCDK("", CDKOpts{
ExecWriter: buf,
FS: afero.NewMemMapFs(),
LookPathFn: func(file string) (string, error) {
return "/bin/npm", nil
},
CommandFn: func(name string, args ...string) *exec.Cmd {
return exec.Command("echo", `
Description: CloudFormation template that represents a load balanced web service on Amazon ECS.
AWSTemplateFormatVersion: "2010-09-09"
Metadata:
Manifest: |
name: admin
type: Load Balanced Web Service
Parameters:
AppName:
Type: String
BootstrapVersion:
Type: AWS::SSM::Parameter::Value<String>
Default: /cdk-bootstrap/hnb659fds/version
Description: Version of the CDK Bootstrap resources in this environment, automatically retrieved from SSM Parameter Store. [cdk:skip]
Conditions:
HasAddons:
Fn::Not:
- Fn::Equals:
- Ref: AddonsTemplateURL
- ""
Resources:
LogGroup:
Type: AWS::Logs::LogGroup
Outputs:
DiscoveryServiceARN:
Description: ARN of the Discovery Service.
Rules:
CheckBootstrapVersion:
Assertions:
- Assert:
Fn::Not:
- Fn::Contains:
- - "1"
- "2"
- "3"
- "4"
- "5"
- Ref: BootstrapVersion
AssertDescription: CDK bootstrap stack version 6 required. Please run 'cdk bootstrap' with a recent version of the CDK CLI.
`)
},
})
// WHEN
out, err := cdk.Override(nil)
// THEN
require.NoError(t, err)
require.Equal(t, `AWSTemplateFormatVersion: "2010-09-09"
Description: CloudFormation template that represents a load balanced web service on Amazon ECS using AWS Copilot and CDK.
Metadata:
Manifest: |
name: admin
type: Load Balanced Web Service
Parameters:
AppName:
Type: String
Conditions:
HasAddons:
Fn::Not:
- Fn::Equals:
- Ref: AddonsTemplateURL
- ""
Resources:
LogGroup:
Type: AWS::Logs::LogGroup
Outputs:
DiscoveryServiceARN:
Description: ARN of the Discovery Service.
`, string(out))
})
}
func TestScaffoldWithCDK(t *testing.T) {
t.Run("scaffolds files in an empty directory", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
dir := filepath.Join("copilot", "frontend", "overrides")
// WHEN
err := ScaffoldWithCDK(fs, dir, []template.CFNResource{
{
Type: "AWS::ECS::Service",
LogicalID: "Service",
},
})
// THEN
require.NoError(t, err)
ok, _ := afero.Exists(fs, filepath.Join(dir, "package.json"))
require.True(t, ok, "package.json should exist")
ok, _ = afero.Exists(fs, filepath.Join(dir, "cdk.json"))
require.True(t, ok, "cdk.json should exist")
ok, _ = afero.Exists(fs, filepath.Join(dir, "stack.ts"))
require.True(t, ok, "stack.ts should exist")
ok, _ = afero.Exists(fs, filepath.Join(dir, "bin", "override.ts"))
require.True(t, ok, "bin/override.ts should exist")
})
t.Run("should return an error if the directory is not empty", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
dir := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(dir, 0755)
_ = afero.WriteFile(fs, filepath.Join(dir, "cdk.json"), []byte("content"), 0644)
// WHEN
err := ScaffoldWithCDK(fs, dir, nil)
// THEN
require.EqualError(t, err, fmt.Sprintf("directory %q is not empty", dir))
})
}
| 258 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"fmt"
)
type errNPMUnavailable struct {
parent error
}
func (err *errNPMUnavailable) Error() string {
return fmt.Sprintf(`"npm" cannot be found: "npm" is required to override with the Cloud Development Kit: %v`, err.parent)
}
// RecommendActions implements the cli.actionRecommender interface.
func (err *errNPMUnavailable) RecommendActions() string {
return fmt.Sprintf(`Please follow instructions at: %q to install "npm"`, "https://docs.npmjs.com/downloading-and-installing-node-js-and-npm")
}
// ErrNotExist occurs when the path of the file associated with an Overrider does not exist.
type ErrNotExist struct {
parent error
}
func (err *ErrNotExist) Error() string {
return fmt.Sprintf("overrider does not exist: %v", err.parent)
}
| 31 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestErrNPMUnavailable_RecommendActions(t *testing.T) {
require.Equal(t, `Please follow instructions at: "https://docs.npmjs.com/downloading-and-installing-node-js-and-npm" to install "npm"`, new(errNPMUnavailable).RecommendActions())
}
| 15 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
// Noop represents an Overrider that does not do any transformations.
type Noop struct{}
// Override does nothing.
func (no *Noop) Override(body []byte) ([]byte, error) {
return body, nil
}
| 13 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNoop_Override(t *testing.T) {
// GIVEN
overrider := new(Noop)
// WHEN
out, err := overrider.Override([]byte("hello, world!"))
// THEN
require.NoError(t, err)
require.Equal(t, "hello, world!", string(out))
}
| 23 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package override defines functionality to interact with the "overrides/" directory
// for accessing and mutating the Copilot generated AWS CloudFormation templates.
package override
import (
"fmt"
"path/filepath"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/spf13/afero"
)
// Info holds metadata about an overrider.
type Info struct {
path string
mode overriderMode
}
func cdkInfo(path string) Info {
return Info{
path: path,
mode: cdkOverrider,
}
}
func yamlPatchInfo(path string) Info {
return Info{
path: path,
mode: yamlPatchOverrider,
}
}
type overriderMode int
const (
cdkOverrider overriderMode = iota + 1
yamlPatchOverrider
)
var templates = template.New()
// Path returns the path to the overrider.
// For CDK applications, returns the root of the CDK directory.
// For YAML patch documents, returns the path to the file.
func (i Info) Path() string {
return i.path
}
// IsCDK returns true if the overrider is a CDK application.
func (i Info) IsCDK() bool {
return i.mode == cdkOverrider
}
// IsYAMLPatch returns true if the overrider is a YAML patch document.
func (i Info) IsYAMLPatch() bool {
return i.mode == yamlPatchOverrider
}
// Lookup returns information indicating if the overrider is a CDK application or YAML Patches.
// If path does not exist, then return an ErrNotExist.
// If path is a directory that contains cfn.patches.yml, then IsYAMLPatch evaluates to true.
// If path is a directory that contains a cdk.json file, then IsCDK evaluates to true.
func Lookup(path string, fs afero.Fs) (Info, error) {
_, err := fs.Stat(path)
if err != nil {
return Info{}, &ErrNotExist{parent: err}
}
files, err := afero.ReadDir(fs, path)
switch {
case err != nil:
return Info{}, fmt.Errorf("read directory %q: %w", path, err)
case len(files) == 0:
return Info{}, fmt.Errorf(`directory at %q is empty`, path)
}
info, err := lookupYAMLPatch(path, fs)
if err == nil { // return yaml info if no error
return info, nil
}
return lookupCDK(path, fs)
}
func lookupYAMLPatch(path string, fs afero.Fs) (Info, error) {
ok, _ := afero.Exists(fs, filepath.Join(path, yamlPatchFile))
if !ok {
return Info{}, fmt.Errorf(`%s does not exist under %q`, yamlPatchFile, path)
}
return yamlPatchInfo(path), nil
}
func lookupCDK(path string, fs afero.Fs) (Info, error) {
ok, _ := afero.Exists(fs, filepath.Join(path, "cdk.json"))
if !ok {
return Info{}, fmt.Errorf(`"cdk.json" does not exist under %q`, path)
}
return cdkInfo(path), nil
}
| 104 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"fmt"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/spf13/afero"
)
func TestLookup(t *testing.T) {
t.Parallel()
t.Run("should return ErrNotExist when the file path does not exist", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend")
_ = fs.MkdirAll(root, 0755)
// WHEN
_, err := Lookup(filepath.Join(root, "overrides"), fs)
// THEN
var notExistErr *ErrNotExist
require.ErrorAs(t, err, ¬ExistErr)
})
t.Run("should return an error when the path is an empty directory", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(root, 0755)
// WHEN
_, err := Lookup(root, fs)
// THEN
require.ErrorContains(t, err, fmt.Sprintf(`directory at %q is empty`, root))
})
t.Run("should return an error when the path is a directory with multiple files but no cdk.json", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(root, 0755)
_ = afero.WriteFile(fs, filepath.Join(root, "README.md"), []byte(""), 0755)
_ = afero.WriteFile(fs, filepath.Join(root, "patch.yaml"), []byte(""), 0755)
_ = afero.WriteFile(fs, filepath.Join(root, "script.js"), []byte(""), 0755)
// WHEN
_, err := Lookup(root, fs)
// THEN
require.ErrorContains(t, err, `"cdk.json" does not exist`)
})
t.Run("should detect a CDK application if a cdk.json file exists within a directory with multiple files", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(root, 0755)
_ = afero.WriteFile(fs, filepath.Join(root, "cdk.json"), []byte("{}"), 0755)
_ = afero.WriteFile(fs, filepath.Join(root, "app.ts"), []byte("console.log('hi')"), 0755)
// WHEN
info, err := Lookup(root, fs)
// THEN
require.NoError(t, err)
require.True(t, info.IsCDK())
require.False(t, info.IsYAMLPatch())
})
t.Run("should return an error when the path is a file", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(root, 0755)
_ = afero.WriteFile(fs, filepath.Join(root, "abc.js"), nil, 0755)
// WHEN
_, err := Lookup(filepath.Join(root, "abc.js"), fs)
// THEN
// wantedMsg := fmt.Sprintf(`YAML patch documents require a ".yml" or ".yaml" extension: %q has a ".js" extension`, filepath.Join(root, "abc.js"))
require.ErrorContains(t, err, "read directory")
require.ErrorContains(t, err, "not a dir")
})
t.Run("should detect a YAML patch document on well-formed file paths", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(root, 0755)
_ = afero.WriteFile(fs, filepath.Join(root, yamlPatchFile), []byte("- {op: 5, path: '/Resources'}"), 0755)
// WHEN
info, err := Lookup(root, fs)
// THEN
require.NoError(t, err)
require.True(t, info.IsYAMLPatch())
require.False(t, info.IsCDK())
require.Equal(t, root, info.Path())
})
t.Run("should detect a YAML patch document for directories with a single YAML file", func(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
root := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(root, 0755)
_ = afero.WriteFile(fs, filepath.Join(root, yamlPatchFile), []byte("- {op: 5, path: '/Resources'}"), 0755)
// WHEN
info, err := Lookup(root, fs)
// THEN
require.NoError(t, err)
require.True(t, info.IsYAMLPatch())
require.False(t, info.IsCDK())
require.Equal(t, root, info.Path())
})
}
| 122 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"fmt"
"path/filepath"
"strconv"
"strings"
"github.com/spf13/afero"
"gopkg.in/yaml.v3"
)
const (
jsonPointerSeparator = "/"
yamlPatchFile = "cfn.patches.yml"
)
// ScaffoldWithPatch sets up YAML patches in dir/ to apply to the
// Copilot generated CloudFormation template.
func ScaffoldWithPatch(fs afero.Fs, dir string) error {
// If the directory does not exist, [afero.IsEmpty] returns false and an error.
// Therefore, we only want to check if a directory is empty only if it also exists.
exists, _ := afero.Exists(fs, dir)
isEmpty, _ := afero.IsEmpty(fs, dir)
if exists && !isEmpty {
return fmt.Errorf("directory %q is not empty", dir)
}
return templates.WalkOverridesPatchDir(writeFilesToDir(dir, fs))
}
// Patch applies overrides configured as JSON Patches,
// as defined in https://www.rfc-editor.org/rfc/rfc6902.
type Patch struct {
filePath string // Absolute path to the overrides/ directory.
fs afero.Fs // OS file system.
}
// PatchOpts is optional configuration for initializing a Patch Overrider.
type PatchOpts struct {
FS afero.Fs // File system interface. If nil, defaults to the OS file system.
}
// WithPatch instantiates a new Patch Overrider with root being the path to the overrides/ directory.
// It supports a single file (cfn.patches.yml) with configured patches.
func WithPatch(filePath string, opts PatchOpts) *Patch {
fs := afero.NewOsFs()
if opts.FS != nil {
fs = opts.FS
}
return &Patch{
filePath: filePath,
fs: fs,
}
}
// Override returns the overriden CloudFormation template body
// after applying YAML patches to it.
func (p *Patch) Override(body []byte) ([]byte, error) {
patches, err := unmarshalPatches(p.filePath, p.fs)
if err != nil {
return nil, err
}
var root yaml.Node
if err := yaml.Unmarshal(body, &root); err != nil {
return nil, fmt.Errorf("invalid template: %w", err)
}
for i := range patches {
patch := patches[i] // needed because operations use pointer to patch.Value
var err error
switch patch.Operation {
case "add":
err = patch.applyAdd(&root)
case "remove":
err = patch.applyRemove(&root)
case "replace":
err = patch.applyReplace(&root)
default:
return nil, fmt.Errorf("unsupported operation %q: supported operations are %q, %q, and %q.", patch.Operation, "add", "remove", "replace")
}
if err != nil {
return nil, fmt.Errorf("unable to apply the %q patch at index %d: %w", patch.Operation, i, err)
}
}
addYAMLPatchDescription(&root)
out, err := yaml.Marshal(&root)
if err != nil {
return nil, fmt.Errorf("unable to return modified document to []byte: %w", err)
}
return out, nil
}
func unmarshalPatches(path string, fs afero.Fs) ([]yamlPatch, error) {
path = filepath.Join(path, yamlPatchFile)
content, err := afero.ReadFile(fs, path)
if err != nil {
return nil, fmt.Errorf("read file at %q: %w", path, err)
}
var patches []yamlPatch
if err := yaml.Unmarshal(content, &patches); err != nil {
return nil, fmt.Errorf("file at %q does not conform to the YAML patch document schema: %w", path, err)
}
return patches, nil
}
// addYAMLPatchDescription updates the Description field of a CloudFormation
// to indicate it has been overriden with YAML patches for us to keep track of usage metrics.
func addYAMLPatchDescription(body *yaml.Node) {
if body.Kind != yaml.DocumentNode || len(body.Content) == 0 {
return
}
body = body.Content[0] // Move inside the document.
for i := 0; i < len(body.Content); i += 2 {
if body.Content[i].Value != "Description" {
continue
}
body.Content[i+1].Value = fmt.Sprintf("%s using AWS Copilot with YAML patches.",
strings.TrimSuffix(body.Content[i+1].Value, "."))
break
}
}
type yamlPatch struct {
Operation string `yaml:"op"`
// Path is in JSON Pointer syntax: https://www.rfc-editor.org/rfc/rfc6901
Path string `yaml:"path"`
Value yaml.Node `yaml:"value"`
}
func (p *yamlPatch) applyAdd(root *yaml.Node) error {
if p.Value.IsZero() {
return fmt.Errorf("value required")
}
pointer := p.pointer()
parent, err := findNodeWithPointer(root, pointer.parent(), nil)
if err != nil {
return err
}
switch parent.Kind {
case yaml.DocumentNode:
return parent.Encode(p.Value)
case yaml.MappingNode:
i, err := findInMap(parent, pointer.finalKey(), pointer.parent())
if err == nil {
// if the key is in this map, they are trying to replace it
return parent.Content[i+1].Encode(p.Value)
}
// if the key isn't in this map, then we need to create it for them
parent.Content = append(parent.Content, &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: pointer.finalKey(),
})
parent.Content = append(parent.Content, &p.Value)
case yaml.SequenceNode:
if pointer.finalKey() == "-" {
// add to end of sequence
parent.Content = append(parent.Content, &p.Value)
return nil
}
idx, err := idxOrError(pointer.finalKey(), len(parent.Content), pointer.parent())
if err != nil {
return err
}
// add node at idx
parent.Content = append(parent.Content[:idx], append([]*yaml.Node{&p.Value}, parent.Content[idx:]...)...)
default:
return &errInvalidNodeKind{
pointer: pointer.parent(),
kind: parent.Kind,
}
}
return nil
}
func (p *yamlPatch) applyRemove(root *yaml.Node) error {
pointer := p.pointer()
parent, err := findNodeWithPointer(root, pointer.parent(), nil)
if err != nil {
return err
}
switch parent.Kind {
case yaml.DocumentNode:
// make sure we are encoding zero into node
p.Value = yaml.Node{}
return parent.Encode(p.Value)
case yaml.MappingNode:
i, err := findInMap(parent, pointer.finalKey(), pointer.parent())
if err != nil {
return err
}
parent.Content = append(parent.Content[:i], parent.Content[i+2:]...)
case yaml.SequenceNode:
idx, err := idxOrError(pointer.finalKey(), len(parent.Content)-1, pointer.parent())
if err != nil {
return err
}
parent.Content = append(parent.Content[:idx], parent.Content[idx+1:]...)
default:
return &errInvalidNodeKind{
pointer: pointer.parent(),
kind: parent.Kind,
}
}
return nil
}
func (p *yamlPatch) applyReplace(root *yaml.Node) error {
if p.Value.IsZero() {
return fmt.Errorf("value required")
}
pointer := p.pointer()
node, err := findNodeWithPointer(root, pointer, nil)
if err != nil {
return err
}
return node.Encode(p.Value)
}
type pointer []string
// parent returns a pointer to the parent of p.
func (p pointer) parent() pointer {
if len(p) == 0 {
return nil
}
return p[:len(p)-1]
}
func (p pointer) finalKey() string {
if len(p) == 0 {
return ""
}
return p[len(p)-1]
}
func (y yamlPatch) pointer() pointer {
split := strings.Split(y.Path, "/")
for i := range split {
// apply replacements as described https://www.rfc-editor.org/rfc/rfc6901#section-4
split[i] = strings.ReplaceAll(split[i], "~1", "/")
split[i] = strings.ReplaceAll(split[i], "~0", "~")
}
return split
}
// findInMap returns the index of the _key_ node in a mapping node's Content.
// The index of the _value_ node is the returned index+1.
//
// If key is not in the map, an error is returned.
func findInMap(node *yaml.Node, key string, traversed pointer) (int, error) {
for i := 0; i < len(node.Content); i += 2 {
if node.Content[i].Value == key {
return i, nil
}
}
return 0, fmt.Errorf("key %q: %q not found in map", strings.Join(traversed, jsonPointerSeparator), key)
}
func findNodeWithPointer(node *yaml.Node, remaining, traversed pointer) (*yaml.Node, error) {
if len(remaining) == 0 {
return node, nil
}
switch node.Kind {
case yaml.DocumentNode:
if len(node.Content) == 0 {
return nil, fmt.Errorf("invalid yaml document node with no content") // shouldn't ever happen
}
return findNodeWithPointer(node.Content[0], remaining[1:], append(traversed, remaining[0]))
case yaml.MappingNode:
i, err := findInMap(node, remaining[0], traversed)
if err != nil {
return nil, err
}
return findNodeWithPointer(node.Content[i+1], remaining[1:], append(traversed, remaining[0]))
case yaml.SequenceNode:
idx, err := idxOrError(remaining[0], len(node.Content)-1, traversed)
if err != nil {
return nil, err
}
return findNodeWithPointer(node.Content[idx], remaining[1:], append(traversed, remaining[0]))
default:
return nil, &errInvalidNodeKind{
pointer: traversed,
kind: node.Kind,
}
}
}
func idxOrError(key string, maxIdx int, traversed pointer) (int, error) {
idx, err := strconv.Atoi(key)
switch {
case err != nil:
return 0, fmt.Errorf("key %q: expected index in sequence, got %q", strings.Join(traversed, jsonPointerSeparator), key)
case idx < 0 || idx > maxIdx:
return 0, fmt.Errorf("key %q: index %d out of bounds for sequence of length %d", strings.Join(traversed, jsonPointerSeparator), idx, maxIdx)
}
return idx, nil
}
type errInvalidNodeKind struct {
pointer pointer
kind yaml.Kind
}
func (e *errInvalidNodeKind) Error() string {
return fmt.Sprintf("key %q: invalid node type %s", strings.Join(e.pointer, jsonPointerSeparator), nodeKindStringer(e.kind))
}
type nodeKindStringer yaml.Kind
func (k nodeKindStringer) String() string {
switch yaml.Kind(k) {
case yaml.DocumentNode:
return "document"
case yaml.SequenceNode:
return "sequence"
case yaml.MappingNode:
return "mapping"
case yaml.ScalarNode:
return "scalar"
case yaml.AliasNode:
return "alias"
default:
return fmt.Sprintf("%#v", k)
}
}
| 357 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package override
import (
"fmt"
"path/filepath"
"strings"
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestScaffoldWithPatch(t *testing.T) {
t.Run("scaffolds files in an empty directory", func(t *testing.T) {
fs := afero.NewMemMapFs()
dir := filepath.Join("copilot", "frontend", "overrides")
err := ScaffoldWithPatch(fs, dir)
require.NoError(t, err)
ok, _ := afero.Exists(fs, filepath.Join(dir, "README.md"))
require.True(t, ok, "README.md should exist")
ok, _ = afero.Exists(fs, filepath.Join(dir, yamlPatchFile))
require.True(t, ok, "cfn.patches.yml should exist")
})
t.Run("should return an error if the directory is not empty", func(t *testing.T) {
fs := afero.NewMemMapFs()
dir := filepath.Join("copilot", "frontend", "overrides")
_ = fs.MkdirAll(dir, 0755)
_ = afero.WriteFile(fs, filepath.Join(dir, "random.txt"), []byte("content"), 0644)
err := ScaffoldWithPatch(fs, dir)
require.EqualError(t, err, fmt.Sprintf("directory %q is not empty", dir))
})
}
func TestPatch_Override(t *testing.T) {
tests := map[string]struct {
yaml string
overrides string
expected string
expectedErr string
}{
"add to map": {
yaml: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition`,
overrides: `
- op: add
path: /Resources/TaskDef
value:
Properties:
Prop1: value
Prop2: false`,
expected: `
Resources:
TaskDef:
Properties:
Prop1: value
Prop2: false`,
},
"add to map in pointer": {
yaml: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition`,
overrides: `
- op: add
path: /Resources/TaskDef/Properties
value:
Prop1: value
Prop2: false`,
expected: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition
Properties:
Prop1: value
Prop2: false`,
},
"add to beginning sequence": {
yaml: `
Resources:
TaskDef:
List:
- asdf
- jkl;`,
overrides: `
- op: add
path: /Resources/TaskDef/List/0
value: qwerty`,
expected: `
Resources:
TaskDef:
List:
- qwerty
- asdf
- jkl;`,
},
"add to middle sequence": {
yaml: `
Resources:
TaskDef:
List:
- asdf
- jkl;`,
overrides: `
- op: add
path: /Resources/TaskDef/List/1
value: qwerty`,
expected: `
Resources:
TaskDef:
List:
- asdf
- qwerty
- jkl;`,
},
"add to end sequence by index": {
yaml: `
Resources:
TaskDef:
List:
- asdf
- jkl;`,
overrides: `
- op: add
path: /Resources/TaskDef/List/2
value: qwerty`,
expected: `
Resources:
TaskDef:
List:
- asdf
- jkl;
- qwerty`,
},
"add to end sequence with -": {
yaml: `
Resources:
IAMRole:
Type: AWS::IAM::Role
Properties:
Policies:
- PolicyName: "Test"
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- "*"
- "**"
Resource:
- "*"`,
overrides: `
- op: add
path: /Resources/IAMRole/Properties/Policies/0/PolicyDocument/Statement/0/Action/-
value:
key: value
key2: value2`,
expected: `
Resources:
IAMRole:
Type: AWS::IAM::Role
Properties:
Policies:
- PolicyName: "Test"
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- "*"
- "**"
- key: value
key2: value2
Resource:
- "*"`,
},
"remove scalar from map": {
yaml: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition
Description: asdf`,
overrides: `
- op: remove
path: /Resources/TaskDef/Description`,
expected: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition`,
},
"remove map from map": {
yaml: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition
Properties:
Prop1: value
Prop2: value`,
overrides: `
- op: remove
path: /Resources/TaskDef/Properties`,
expected: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition`,
},
"remove from beginning of sequence": {
yaml: `
Resources:
- obj1: value
list:
- item0
- item1
- obj2: value
list:
- item0
- item1`,
overrides: `
- op: remove
path: /Resources/1/list/0`,
expected: `
Resources:
- obj1: value
list:
- item0
- item1
- obj2: value
list:
- item1`,
},
"remove from middle of sequence": {
yaml: `
Resources:
- obj1: value
list:
- item0
- item1
- obj2: value
list:
- item0
- item1
- item2`,
overrides: `
- op: remove
path: /Resources/1/list/1`,
expected: `
Resources:
- obj1: value
list:
- item0
- item1
- obj2: value
list:
- item0
- item2`,
},
"remove from end of sequence": {
yaml: `
Resources:
- obj1: value
list:
- item0
- item1
- obj2: value
list:
- item0
- item1
- item2`,
overrides: `
- op: remove
path: /Resources/1/list/2`,
expected: `
Resources:
- obj1: value
list:
- item0
- item1
- obj2: value
list:
- item0
- item1`,
},
"replace scalar with scalar": {
yaml: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition
Description: asdf`,
overrides: `
- op: replace
path: /Resources/TaskDef/Description
value: jkl;`,
expected: `
Resources:
TaskDef:
Type: AWS::ECS::TaskDefinition
Description: jkl;`,
},
"replace map with scalar": {
yaml: `
Resources:
List:
- asdf
- key: value
key2: value2
- - very list
- many item`,
overrides: `
- op: replace
path: /Resources/List/1
value: jkl;`,
expected: `
Resources:
List:
- asdf
- jkl;
- - very list
- many item`,
},
"works with special characters": {
yaml: `
Resources:
key:
key~with/weirdchars/: old`,
overrides: `
- op: replace
path: /Resources/key/key~0with~1weirdchars~1
value: new`,
expected: `
Resources:
key:
key~with/weirdchars/: new`,
},
"add works with doc selector": {
yaml: `
Resources:
key: value`,
overrides: `
- op: add
path: ""
value:
a: aaa
b: bbb`,
expected: `
a: aaa
b: bbb`,
},
"replace works with doc selector": {
yaml: `
Resources:
key: value`,
overrides: `
- op: replace
path: ""
value:
a: aaa
b: bbb`,
expected: `
a: aaa
b: bbb`,
},
"remove works with doc selector": {
yaml: `
Resources:
key: value`,
overrides: `
- op: remove
path: ""`,
expected: ``,
},
"empty string key works": {
yaml: `
key: asdf
"": old`,
overrides: `
- op: replace
path: /
value: new`,
expected: `
key: asdf
"": new`,
},
"nothing happens with empty patch file": {
yaml: `
a:
b: value`,
expected: `
a:
b: value`,
},
"error on invalid patch file format": {
overrides: `
op: add
path: /
value: new`,
expectedErr: `file at "/cfn.patches.yml" does not conform to the YAML patch document schema: yaml: unmarshal errors:
line 1: cannot unmarshal !!map into []override.yamlPatch`,
},
"error on unsupported operation": {
overrides: `
- op: unsupported
path: /
value: new`,
expectedErr: `unsupported operation "unsupported": supported operations are "add", "remove", and "replace".`,
},
"error in map following path": {
yaml: `
a:
b:
- c
- d`,
overrides: `
- op: replace
path: /a/e/c
value: val`,
expectedErr: `unable to apply the "replace" patch at index 0: key "/a": "e" not found in map`,
},
"error out of bounds sequence following path": {
yaml: `
a:
b:
- c
- d`,
overrides: `
- op: add
path: /a/b/3
value: val`,
expectedErr: `unable to apply the "add" patch at index 0: key "/a/b": index 3 out of bounds for sequence of length 2`,
},
"error invalid index sequence following path": {
yaml: `
a:
b:
- c
- d`,
overrides: `
- op: add
path: /a/b/e
value: val`,
expectedErr: `unable to apply the "add" patch at index 0: key "/a/b": expected index in sequence, got "e"`,
},
"error invalid index sequence - in middle of path": {
yaml: `
a:
b:
- key: abcd
- key: efgh`,
overrides: `
- op: add
path: /a/b/-/key
value: val`,
expectedErr: `unable to apply the "add" patch at index 0: key "/a/b": expected index in sequence, got "-"`,
},
"error targeting scalar while following path add": {
yaml: `
a:
b:
- c
- d`,
overrides: `
- op: add
path: /a/b/1/e
value: val`,
expectedErr: `unable to apply the "add" patch at index 0: key "/a/b/1": invalid node type scalar`,
},
"error targeting scalar while following path remove": {
yaml: `
a:
b:
- c
- d`,
overrides: `
- op: remove
path: /a/b/1/e`,
expectedErr: `unable to apply the "remove" patch at index 0: key "/a/b/1": invalid node type scalar`,
},
"error targeting scalar while following path replace": {
yaml: `
a:
b:
- c
- d`,
overrides: `
- op: replace
path: /a/b/1/e
value: val`,
expectedErr: `unable to apply the "replace" patch at index 0: key "/a/b/1": invalid node type scalar`,
},
"error add with no value": {
overrides: `
- op: add
path: /a/b/c`,
expectedErr: `unable to apply the "add" patch at index 0: value required`,
},
"error replace with no value": {
overrides: `
- op: replace
path: /a/b/c`,
expectedErr: `unable to apply the "replace" patch at index 0: value required`,
},
"error remove nonexistant value from map": {
yaml: `
a:
b: value`,
overrides: `
- op: remove
path: /a/c`,
expectedErr: `unable to apply the "remove" patch at index 0: key "/a": "c" not found in map`,
},
"error patch index incrememts": {
yaml: `
a:
b: value`,
overrides: `
- op: remove
path: /a/b
- op: remove
path: /a/c`,
expectedErr: `unable to apply the "remove" patch at index 1: key "/a": "c" not found in map`,
},
"updates the Description field of a CloudFormation template with YAML patch metrics": {
yaml: `
Description: "CloudFormation template that represents a backend service on Amazon ECS."
Resources:
key: value`,
overrides: `
- op: replace
path: /Resources/key
value: other`,
expected: `
Description: "CloudFormation template that represents a backend service on Amazon ECS using AWS Copilot with YAML patches."
Resources:
key: other`,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
fs := afero.NewMemMapFs()
file, err := fs.Create("/" + yamlPatchFile)
require.NoError(t, err)
_, err = file.WriteString(strings.TrimSpace(tc.overrides))
require.NoError(t, err)
p := WithPatch("/", PatchOpts{
FS: fs,
})
out, err := p.Override([]byte(strings.TrimSpace(tc.yaml)))
if tc.expectedErr != "" {
require.EqualError(t, err, tc.expectedErr)
return
}
require.NoError(t, err)
// convert for better comparison output
// limitation: doesn't test for comments sticking around
var expected interface{}
var actual interface{}
require.NoError(t, yaml.Unmarshal([]byte(tc.expected), &expected))
require.NoError(t, yaml.Unmarshal(out, &actual))
require.Equal(t, expected, actual)
})
}
}
| 577 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package repository provides support for building and pushing images to a repository.
package repository
import (
"context"
"fmt"
"io"
"github.com/aws/copilot-cli/internal/pkg/exec"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
)
// ContainerLoginBuildPusher provides support for logging in to repositories, building images and pushing images to repositories.
type ContainerLoginBuildPusher interface {
Build(ctx context.Context, args *dockerengine.BuildArguments, w io.Writer) error
Login(uri, username, password string) error
Push(ctx context.Context, uri string, w io.Writer, tags ...string) (digest string, err error)
IsEcrCredentialHelperEnabled(uri string) bool
}
// Registry gets information of repositories.
type Registry interface {
RepositoryURI(name string) (string, error)
Auth() (string, string, error)
}
// Repository builds and pushes images to a repository.
type Repository struct {
name string
registry Registry
uri string
docker ContainerLoginBuildPusher
}
// New instantiates a new Repository.
func New(registry Registry, name string) *Repository {
return &Repository{
name: name,
registry: registry,
docker: dockerengine.New(exec.NewCmd()),
}
}
// NewWithURI instantiates a new Repository with uri being set.
func NewWithURI(registry Registry, name, uri string) *Repository {
return &Repository{
name: name,
registry: registry,
uri: uri,
docker: dockerengine.New(exec.NewCmd()),
}
}
// BuildAndPush builds the image from Dockerfile and pushes it to the repository with tags.
func (r *Repository) BuildAndPush(ctx context.Context, args *dockerengine.BuildArguments, w io.Writer) (digest string, err error) {
if args.URI == "" {
uri, err := r.repositoryURI()
if err != nil {
return "", err
}
args.URI = uri
}
if err := r.docker.Build(ctx, args, w); err != nil {
return "", fmt.Errorf("build Dockerfile at %s: %w", args.Dockerfile, err)
}
digest, err = r.docker.Push(ctx, args.URI, w, args.Tags...)
if err != nil {
return "", fmt.Errorf("push to repo %s: %w", r.name, err)
}
return digest, nil
}
// repositoryURI() returns the uri of the repository.
func (r *Repository) repositoryURI() (string, error) {
if r.uri != "" {
return r.uri, nil
}
uri, err := r.registry.RepositoryURI(r.name)
if err != nil {
return "", fmt.Errorf("get repository URI: %w", err)
}
r.uri = uri
return uri, nil
}
// Login authenticates with a ECR registry by performing a Docker login,
// but only if the `credStore` attribute value is not set to `ecr-login`.
// If the `credStore` value is `ecr-login`, no login is performed.
// Returns uri of the repository or an error, if any occurs during the login process.
func (r *Repository) Login() (string, error) {
uri, err := r.repositoryURI()
if err != nil {
return "", fmt.Errorf("retrieve URI for repository: %w", err)
}
if !r.docker.IsEcrCredentialHelperEnabled(uri) {
username, password, err := r.registry.Auth()
if err != nil {
return "", fmt.Errorf("get auth: %w", err)
}
if err := r.docker.Login(uri, username, password); err != nil {
return "", fmt.Errorf("docker login %s: %w", uri, err)
}
}
return uri, nil
}
| 112 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package repository
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"testing"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"github.com/aws/copilot-cli/internal/pkg/repository/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
func TestRepository_BuildAndPush(t *testing.T) {
inRepoName := "my-repo"
inDockerfilePath := "path/to/dockerfile"
mockTag1, mockTag2, mockTag3 := "tag1", "tag2", "tag3"
mockRepoURI := "mockRepoURI"
ctx := context.Background()
defaultDockerArguments := dockerengine.BuildArguments{
URI: mockRepoURI,
Dockerfile: inDockerfilePath,
Context: filepath.Dir(inDockerfilePath),
Tags: []string{mockTag1, mockTag2, mockTag3},
}
testCases := map[string]struct {
inURI string
inMockDocker func(m *mocks.MockContainerLoginBuildPusher)
mockRegistry func(m *mocks.MockRegistry)
wantedError error
wantedDigest string
}{
"failed to get repo URI": {
mockRegistry: func(m *mocks.MockRegistry) {
m.EXPECT().RepositoryURI(inRepoName).Return("", errors.New("some error"))
},
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {},
wantedError: errors.New("get repository URI: some error"),
},
"failed to build image": {
inURI: defaultDockerArguments.URI,
mockRegistry: func(m *mocks.MockRegistry) {
m.EXPECT().Auth().Return("", "", nil).AnyTimes()
},
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {
m.EXPECT().Build(ctx, &defaultDockerArguments, gomock.Any()).Return(errors.New("error building image"))
m.EXPECT().Push(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
},
wantedError: fmt.Errorf("build Dockerfile at %s: error building image", inDockerfilePath),
},
"failed to push": {
inURI: defaultDockerArguments.URI,
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {
m.EXPECT().Build(ctx, &defaultDockerArguments, gomock.Any()).Times(1)
m.EXPECT().Push(ctx, mockRepoURI, gomock.Any(), mockTag1, mockTag2, mockTag3).Return("", errors.New("error pushing image"))
},
wantedError: errors.New("push to repo my-repo: error pushing image"),
},
"push with ecr-login": {
inURI: defaultDockerArguments.URI,
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {
m.EXPECT().Build(ctx, &defaultDockerArguments, gomock.Any()).Return(nil).Times(1)
m.EXPECT().Push(ctx, mockRepoURI, gomock.Any(), mockTag1, mockTag2, mockTag3).Return("sha256:f1d4ae3f7261a72e98c6ebefe9985cf10a0ea5bd762585a43e0700ed99863807", nil)
},
wantedDigest: "sha256:f1d4ae3f7261a72e98c6ebefe9985cf10a0ea5bd762585a43e0700ed99863807",
},
"success": {
mockRegistry: func(m *mocks.MockRegistry) {
m.EXPECT().RepositoryURI(inRepoName).Return(defaultDockerArguments.URI, nil)
},
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {
m.EXPECT().Build(ctx, &defaultDockerArguments, gomock.Any()).Return(nil).Times(1)
m.EXPECT().Push(ctx, mockRepoURI, gomock.Any(), mockTag1, mockTag2, mockTag3).Return("sha256:f1d4ae3f7261a72e98c6ebefe9985cf10a0ea5bd762585a43e0700ed99863807", nil)
},
wantedDigest: "sha256:f1d4ae3f7261a72e98c6ebefe9985cf10a0ea5bd762585a43e0700ed99863807",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRepoGetter := mocks.NewMockRegistry(ctrl)
mockDocker := mocks.NewMockContainerLoginBuildPusher(ctrl)
if tc.mockRegistry != nil {
tc.mockRegistry(mockRepoGetter)
}
if tc.inMockDocker != nil {
tc.inMockDocker(mockDocker)
}
repo := &Repository{
name: inRepoName,
registry: mockRepoGetter,
uri: tc.inURI,
docker: mockDocker,
}
buf := new(strings.Builder)
digest, err := repo.BuildAndPush(ctx, &dockerengine.BuildArguments{
Dockerfile: inDockerfilePath,
Context: filepath.Dir(inDockerfilePath),
Tags: []string{mockTag1, mockTag2, mockTag3},
}, buf)
if tc.wantedError != nil {
require.EqualError(t, tc.wantedError, err.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedDigest, digest)
}
})
}
}
func Test_Login(t *testing.T) {
const mockRepoURI = "mockRepoURI"
testCases := map[string]struct {
inMockDocker func(m *mocks.MockContainerLoginBuildPusher)
mockRegistry func(m *mocks.MockRegistry)
wantedURI string
wantedError error
}{
"failed to get auth": {
mockRegistry: func(m *mocks.MockRegistry) {
m.EXPECT().Auth().Return("", "", errors.New("error getting auth"))
},
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {
m.EXPECT().IsEcrCredentialHelperEnabled("mockRepoURI").Return(false)
m.EXPECT().Login(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
},
wantedError: errors.New("get auth: error getting auth"),
},
"failed to login": {
mockRegistry: func(m *mocks.MockRegistry) {
m.EXPECT().Auth().Return("my-name", "my-pwd", nil)
},
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {
m.EXPECT().IsEcrCredentialHelperEnabled("mockRepoURI").Return(false)
m.EXPECT().Login("mockRepoURI", "my-name", "my-pwd").Return(errors.New("error logging in"))
},
wantedError: fmt.Errorf("docker login %s: error logging in", mockRepoURI),
},
"no error when performing login": {
mockRegistry: func(m *mocks.MockRegistry) {
m.EXPECT().Auth().Return("my-name", "my-pwd", nil)
},
inMockDocker: func(m *mocks.MockContainerLoginBuildPusher) {
m.EXPECT().IsEcrCredentialHelperEnabled("mockRepoURI").Return(false)
m.EXPECT().Login("mockRepoURI", "my-name", "my-pwd").Return(nil)
},
wantedURI: mockRepoURI,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRepoGetter := mocks.NewMockRegistry(ctrl)
mockDocker := mocks.NewMockContainerLoginBuildPusher(ctrl)
if tc.mockRegistry != nil {
tc.mockRegistry(mockRepoGetter)
}
if tc.inMockDocker != nil {
tc.inMockDocker(mockDocker)
}
repo := &Repository{
registry: mockRepoGetter,
uri: mockRepoURI,
docker: mockDocker,
}
gotURI, gotErr := repo.Login()
if tc.wantedError != nil {
require.EqualError(t, tc.wantedError, gotErr.Error())
} else {
require.NoError(t, gotErr)
require.Equal(t, tc.wantedURI, gotURI)
}
})
}
}
| 196 |
copilot-cli | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: ./internal/pkg/repository/repository.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
io "io"
reflect "reflect"
dockerengine "github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
gomock "github.com/golang/mock/gomock"
)
// MockContainerLoginBuildPusher is a mock of ContainerLoginBuildPusher interface.
type MockContainerLoginBuildPusher struct {
ctrl *gomock.Controller
recorder *MockContainerLoginBuildPusherMockRecorder
}
// MockContainerLoginBuildPusherMockRecorder is the mock recorder for MockContainerLoginBuildPusher.
type MockContainerLoginBuildPusherMockRecorder struct {
mock *MockContainerLoginBuildPusher
}
// NewMockContainerLoginBuildPusher creates a new mock instance.
func NewMockContainerLoginBuildPusher(ctrl *gomock.Controller) *MockContainerLoginBuildPusher {
mock := &MockContainerLoginBuildPusher{ctrl: ctrl}
mock.recorder = &MockContainerLoginBuildPusherMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockContainerLoginBuildPusher) EXPECT() *MockContainerLoginBuildPusherMockRecorder {
return m.recorder
}
// Build mocks base method.
func (m *MockContainerLoginBuildPusher) Build(ctx context.Context, args *dockerengine.BuildArguments, w io.Writer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Build", ctx, args, w)
ret0, _ := ret[0].(error)
return ret0
}
// Build indicates an expected call of Build.
func (mr *MockContainerLoginBuildPusherMockRecorder) Build(ctx, args, w interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Build", reflect.TypeOf((*MockContainerLoginBuildPusher)(nil).Build), ctx, args, w)
}
// IsEcrCredentialHelperEnabled mocks base method.
func (m *MockContainerLoginBuildPusher) IsEcrCredentialHelperEnabled(uri string) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsEcrCredentialHelperEnabled", uri)
ret0, _ := ret[0].(bool)
return ret0
}
// IsEcrCredentialHelperEnabled indicates an expected call of IsEcrCredentialHelperEnabled.
func (mr *MockContainerLoginBuildPusherMockRecorder) IsEcrCredentialHelperEnabled(uri interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsEcrCredentialHelperEnabled", reflect.TypeOf((*MockContainerLoginBuildPusher)(nil).IsEcrCredentialHelperEnabled), uri)
}
// Login mocks base method.
func (m *MockContainerLoginBuildPusher) Login(uri, username, password string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Login", uri, username, password)
ret0, _ := ret[0].(error)
return ret0
}
// Login indicates an expected call of Login.
func (mr *MockContainerLoginBuildPusherMockRecorder) Login(uri, username, password interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Login", reflect.TypeOf((*MockContainerLoginBuildPusher)(nil).Login), uri, username, password)
}
// Push mocks base method.
func (m *MockContainerLoginBuildPusher) Push(ctx context.Context, uri string, w io.Writer, tags ...string) (string, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, uri, w}
for _, a := range tags {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Push", varargs...)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Push indicates an expected call of Push.
func (mr *MockContainerLoginBuildPusherMockRecorder) Push(ctx, uri, w interface{}, tags ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, uri, w}, tags...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockContainerLoginBuildPusher)(nil).Push), varargs...)
}
// MockRegistry is a mock of Registry interface.
type MockRegistry struct {
ctrl *gomock.Controller
recorder *MockRegistryMockRecorder
}
// MockRegistryMockRecorder is the mock recorder for MockRegistry.
type MockRegistryMockRecorder struct {
mock *MockRegistry
}
// NewMockRegistry creates a new mock instance.
func NewMockRegistry(ctrl *gomock.Controller) *MockRegistry {
mock := &MockRegistry{ctrl: ctrl}
mock.recorder = &MockRegistryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockRegistry) EXPECT() *MockRegistryMockRecorder {
return m.recorder
}
// Auth mocks base method.
func (m *MockRegistry) Auth() (string, string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Auth")
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(string)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// Auth indicates an expected call of Auth.
func (mr *MockRegistryMockRecorder) Auth() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Auth", reflect.TypeOf((*MockRegistry)(nil).Auth))
}
// RepositoryURI mocks base method.
func (m *MockRegistry) RepositoryURI(name string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RepositoryURI", name)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RepositoryURI indicates an expected call of RepositoryURI.
func (mr *MockRegistryMockRecorder) RepositoryURI(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RepositoryURI", reflect.TypeOf((*MockRegistry)(nil).RepositoryURI), name)
}
| 154 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package jobrunner provides support for invoking jobs.
package jobrunner
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack"
)
// StateMachineExecutor is the interface that implements the Execute method to invoke a state machine.
type StateMachineExecutor interface {
Execute(stateMachineARN string) error
}
// CFNStackResourceLister is the interface to list CloudFormation stack resources.
type CFNStackResourceLister interface {
StackResources(name string) ([]*cloudformation.StackResource, error)
}
// JobRunner can invoke a job.
type JobRunner struct {
app string
env string
job string
cfn CFNStackResourceLister
stateMachine StateMachineExecutor
}
// Config hold the data needed to create a JobRunner.
type Config struct {
App string // Name of the application.
Env string // Name of the environment.
Job string // Name of the job.
// Dependencies to invoke a job.
CFN CFNStackResourceLister // CloudFormation client to list stack resources.
StateMachine StateMachineExecutor // StepFunction client to execute a state machine.
}
// New creates a new JobRunner.
func New(cfg *Config) *JobRunner {
return &JobRunner{
app: cfg.App,
env: cfg.Env,
job: cfg.Job,
cfn: cfg.CFN,
stateMachine: cfg.StateMachine,
}
}
// Run invokes a job.
// An error is returned if the state machine's ARN can not be derived from the job, or the execution fails.
func (job *JobRunner) Run() error {
resources, err := job.cfn.StackResources(stack.NameForWorkload(job.app, job.env, job.job))
if err != nil {
return fmt.Errorf("describe stack %q: %v", stack.NameForWorkload(job.app, job.env, job.job), err)
}
var arn string
for _, resource := range resources {
if aws.StringValue(resource.ResourceType) == "AWS::StepFunctions::StateMachine" {
arn = aws.StringValue(resource.PhysicalResourceId)
break
}
}
if arn == "" {
return fmt.Errorf("state machine for job %q is not found in environment %q and application %q", job.job, job.env, job.app)
}
if err := job.stateMachine.Execute(arn); err != nil {
return fmt.Errorf("execute state machine %q: %v", arn, err)
}
return nil
}
| 81 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package jobrunner
import (
"errors"
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/runner/jobrunner/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
func TestJobRunner_Run(t *testing.T) {
testCases := map[string]struct {
MockExecutor func(m *mocks.MockStateMachineExecutor)
App string
Env string
Job string
MockCFN func(m *mocks.MockCFNStackResourceLister)
wantedError error
}{
"missing stack": {
MockExecutor: func(m *mocks.MockStateMachineExecutor) {
m.EXPECT().Execute("arn:aws:states:us-east-1:111111111111:stateMachine:app-env-job").Return(nil).AnyTimes()
},
App: "appname",
Env: "envname",
Job: "jobname",
MockCFN: func(m *mocks.MockCFNStackResourceLister) {
m.EXPECT().StackResources("appname-envname-jobname").Return(nil, fmt.Errorf("Missing Stack Resource"))
},
wantedError: fmt.Errorf(`describe stack "appname-envname-jobname": Missing Stack Resource`),
},
"missing statemachine resource": {
MockExecutor: func(m *mocks.MockStateMachineExecutor) {
m.EXPECT().Execute("arn:aws:states:us-east-1:111111111111:stateMachine:app-env-job").Return(nil).AnyTimes()
},
App: "appname",
Env: "envname",
Job: "jobname",
MockCFN: func(m *mocks.MockCFNStackResourceLister) {
m.EXPECT().StackResources("appname-envname-jobname").Return([]*cloudformation.StackResource{
{
ResourceType: aws.String("AWS::Lambda::Function"),
},
}, nil)
},
wantedError: errors.New(`state machine for job "jobname" is not found in environment "envname" and application "appname"`),
},
"failed statemachine execution": {
MockExecutor: func(m *mocks.MockStateMachineExecutor) {
m.EXPECT().Execute("arn:aws:states:us-east-1:111111111111:stateMachine:app-env-job").Return(fmt.Errorf("ExecutionLimitExceeded"))
},
App: "appname",
Env: "envname",
Job: "jobname",
MockCFN: func(m *mocks.MockCFNStackResourceLister) {
m.EXPECT().StackResources("appname-envname-jobname").Return([]*cloudformation.StackResource{
{
ResourceType: aws.String("AWS::StepFunctions::StateMachine"),
PhysicalResourceId: aws.String("arn:aws:states:us-east-1:111111111111:stateMachine:app-env-job"),
},
}, nil)
},
wantedError: fmt.Errorf(`execute state machine "arn:aws:states:us-east-1:111111111111:stateMachine:app-env-job": ExecutionLimitExceeded`),
},
"run success": {
MockExecutor: func(m *mocks.MockStateMachineExecutor) {
m.EXPECT().Execute("arn:aws:states:us-east-1:111111111111:stateMachine:app-env-job").Return(nil)
},
App: "appname",
Env: "envname",
Job: "jobname",
MockCFN: func(m *mocks.MockCFNStackResourceLister) {
m.EXPECT().StackResources("appname-envname-jobname").Return([]*cloudformation.StackResource{
{
ResourceType: aws.String("AWS::StepFunctions::StateMachine"),
PhysicalResourceId: aws.String("arn:aws:states:us-east-1:111111111111:stateMachine:app-env-job"),
},
}, nil)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cfn := mocks.NewMockCFNStackResourceLister(ctrl)
sfn := mocks.NewMockStateMachineExecutor(ctrl)
tc.MockCFN(cfn)
tc.MockExecutor(sfn)
jobRunner := JobRunner{
stateMachine: sfn,
app: tc.App,
env: tc.Env,
job: tc.Job,
cfn: cfn,
}
err := jobRunner.Run()
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
}
})
}
}
| 128 |
copilot-cli | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: ./internal/pkg/runner/jobrunner/jobrunner.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
cloudformation "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
gomock "github.com/golang/mock/gomock"
)
// MockStateMachineExecutor is a mock of StateMachineExecutor interface.
type MockStateMachineExecutor struct {
ctrl *gomock.Controller
recorder *MockStateMachineExecutorMockRecorder
}
// MockStateMachineExecutorMockRecorder is the mock recorder for MockStateMachineExecutor.
type MockStateMachineExecutorMockRecorder struct {
mock *MockStateMachineExecutor
}
// NewMockStateMachineExecutor creates a new mock instance.
func NewMockStateMachineExecutor(ctrl *gomock.Controller) *MockStateMachineExecutor {
mock := &MockStateMachineExecutor{ctrl: ctrl}
mock.recorder = &MockStateMachineExecutorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockStateMachineExecutor) EXPECT() *MockStateMachineExecutorMockRecorder {
return m.recorder
}
// Execute mocks base method.
func (m *MockStateMachineExecutor) Execute(stateMachineARN string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Execute", stateMachineARN)
ret0, _ := ret[0].(error)
return ret0
}
// Execute indicates an expected call of Execute.
func (mr *MockStateMachineExecutorMockRecorder) Execute(stateMachineARN interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockStateMachineExecutor)(nil).Execute), stateMachineARN)
}
// MockCFNStackResourceLister is a mock of CFNStackResourceLister interface.
type MockCFNStackResourceLister struct {
ctrl *gomock.Controller
recorder *MockCFNStackResourceListerMockRecorder
}
// MockCFNStackResourceListerMockRecorder is the mock recorder for MockCFNStackResourceLister.
type MockCFNStackResourceListerMockRecorder struct {
mock *MockCFNStackResourceLister
}
// NewMockCFNStackResourceLister creates a new mock instance.
func NewMockCFNStackResourceLister(ctrl *gomock.Controller) *MockCFNStackResourceLister {
mock := &MockCFNStackResourceLister{ctrl: ctrl}
mock.recorder = &MockCFNStackResourceListerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCFNStackResourceLister) EXPECT() *MockCFNStackResourceListerMockRecorder {
return m.recorder
}
// StackResources mocks base method.
func (m *MockCFNStackResourceLister) StackResources(name string) ([]*cloudformation.StackResource, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StackResources", name)
ret0, _ := ret[0].([]*cloudformation.StackResource)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StackResources indicates an expected call of StackResources.
func (mr *MockCFNStackResourceListerMockRecorder) StackResources(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StackResources", reflect.TypeOf((*MockCFNStackResourceLister)(nil).StackResources), name)
}
| 88 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package s3 provides a client to retrieve Copilot S3 information.
package s3
import (
"fmt"
"sort"
"strings"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/resourcegroups"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
"github.com/aws/copilot-cli/internal/pkg/deploy"
)
const (
bucketType = "s3:bucket"
)
type resourceGetter interface {
GetResourcesByTags(resourceType string, tags map[string]string) ([]*resourcegroups.Resource, error)
}
// Client retrieves Copilot S3 service information from AWS.
type Client struct {
rgGetter resourceGetter
}
// New inits a new Client.
func New(sess *session.Session) *Client {
return &Client{
rgGetter: resourcegroups.New(sess),
}
}
// BucketName returns the bucket name given the Copilot app, env, and Static Site service name.
func (c Client) BucketName(app, env, svc string) (string, error) {
tags := tags(map[string]string{
deploy.AppTagKey: app,
deploy.EnvTagKey: env,
deploy.ServiceTagKey: svc,
})
buckets, err := c.rgGetter.GetResourcesByTags(bucketType, tags)
if err != nil {
return "", fmt.Errorf("get S3 bucket with tags %s: %w", tags.String(), err)
}
if len(buckets) == 0 {
return "", &ErrNotFound{tags}
}
if len(buckets) > 1 {
return "", fmt.Errorf("more than one S3 bucket with tags %s", tags.String())
}
bucketName, _, err := s3.ParseARN(buckets[0].ARN)
if err != nil {
return "", fmt.Errorf("parse ARN %s: %w", buckets[0].ARN, err)
}
return bucketName, nil
}
type tags map[string]string
func (tags tags) String() string {
serialized := make([]string, len(tags))
var i = 0
for k, v := range tags {
serialized[i] = fmt.Sprintf("%q=%q", k, v)
i += 1
}
sort.SliceStable(serialized, func(i, j int) bool { return serialized[i] < serialized[j] })
return strings.Join(serialized, ",")
}
// ErrNotFound is returned when no bucket is found
// matching the given tags.
type ErrNotFound struct {
tags tags
}
func (e *ErrNotFound) Error() string {
return fmt.Sprintf("no S3 bucket found with tags %s", e.tags.String())
}
| 84 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package s3
import (
"errors"
"fmt"
"testing"
"github.com/aws/copilot-cli/internal/pkg/aws/resourcegroups"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/s3/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
type clientMocks struct {
resourceGetter *mocks.MockresourceGetter
}
func TestClient_Service(t *testing.T) {
const (
mockApp = "mockApp"
mockEnv = "mockEnv"
mockSvc = "mockSvc"
mockARN = "arn:aws:s3:us-west-2:1234567890:myBucket"
mockBadARN = "badARN"
)
mockError := errors.New("some error")
getRgInput := map[string]string{
deploy.AppTagKey: mockApp,
deploy.EnvTagKey: mockEnv,
deploy.ServiceTagKey: mockSvc,
}
tests := map[string]struct {
setupMocks func(mocks clientMocks)
wantedError error
wanted string
}{
"error if fail to get bucket resource": {
setupMocks: func(m clientMocks) {
gomock.InOrder(
m.resourceGetter.EXPECT().GetResourcesByTags(bucketType, getRgInput).
Return(nil, mockError),
)
},
wantedError: fmt.Errorf(`get S3 bucket with tags "copilot-application"="mockApp","copilot-environment"="mockEnv","copilot-service"="mockSvc": some error`),
},
"error if got 0 bucket": {
setupMocks: func(m clientMocks) {
gomock.InOrder(
m.resourceGetter.EXPECT().GetResourcesByTags(bucketType, getRgInput).
Return([]*resourcegroups.Resource{}, nil),
)
},
wantedError: fmt.Errorf(`no S3 bucket found with tags "copilot-application"="mockApp","copilot-environment"="mockEnv","copilot-service"="mockSvc"`),
},
"error if got more than 1 bucket": {
setupMocks: func(m clientMocks) {
gomock.InOrder(
m.resourceGetter.EXPECT().GetResourcesByTags(bucketType, getRgInput).
Return([]*resourcegroups.Resource{
{}, {},
}, nil),
)
},
wantedError: fmt.Errorf(`more than one S3 bucket with tags "copilot-application"="mockApp","copilot-environment"="mockEnv","copilot-service"="mockSvc"`),
},
"fail to parse ARN": {
setupMocks: func(m clientMocks) {
gomock.InOrder(
m.resourceGetter.EXPECT().GetResourcesByTags(bucketType, getRgInput).
Return([]*resourcegroups.Resource{
{ARN: mockBadARN},
}, nil),
)
},
wantedError: fmt.Errorf("parse ARN badARN: invalid S3 ARN: arn: invalid prefix"),
},
"success": {
setupMocks: func(m clientMocks) {
gomock.InOrder(
m.resourceGetter.EXPECT().GetResourcesByTags(bucketType, getRgInput).
Return([]*resourcegroups.Resource{
{ARN: mockARN},
}, nil),
)
},
wanted: "myBucket",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// GIVEN
mockRgGetter := mocks.NewMockresourceGetter(ctrl)
mocks := clientMocks{
resourceGetter: mockRgGetter,
}
test.setupMocks(mocks)
client := Client{
rgGetter: mockRgGetter,
}
// WHEN
get, err := client.BucketName(mockApp, mockEnv, mockSvc)
// THEN
if test.wantedError != nil {
require.EqualError(t, err, test.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, get, test.wanted)
}
})
}
}
| 126 |
copilot-cli | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: ./internal/pkg/s3/s3.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
resourcegroups "github.com/aws/copilot-cli/internal/pkg/aws/resourcegroups"
gomock "github.com/golang/mock/gomock"
)
// MockresourceGetter is a mock of resourceGetter interface.
type MockresourceGetter struct {
ctrl *gomock.Controller
recorder *MockresourceGetterMockRecorder
}
// MockresourceGetterMockRecorder is the mock recorder for MockresourceGetter.
type MockresourceGetterMockRecorder struct {
mock *MockresourceGetter
}
// NewMockresourceGetter creates a new mock instance.
func NewMockresourceGetter(ctrl *gomock.Controller) *MockresourceGetter {
mock := &MockresourceGetter{ctrl: ctrl}
mock.recorder = &MockresourceGetterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockresourceGetter) EXPECT() *MockresourceGetterMockRecorder {
return m.recorder
}
// GetResourcesByTags mocks base method.
func (m *MockresourceGetter) GetResourcesByTags(resourceType string, tags map[string]string) ([]*resourcegroups.Resource, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetResourcesByTags", resourceType, tags)
ret0, _ := ret[0].([]*resourcegroups.Resource)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetResourcesByTags indicates an expected call of GetResourcesByTags.
func (mr *MockresourceGetterMockRecorder) GetResourcesByTags(resourceType, tags interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcesByTags", reflect.TypeOf((*MockresourceGetter)(nil).GetResourcesByTags), resourceType, tags)
}
| 51 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"fmt"
"math/rand"
"strings"
"sync"
"time"
awsarn "github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/cloudformation"
cfn "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
)
// StackEventsDescriber is the CloudFormation interface needed to describe stack events.
type StackEventsDescriber interface {
DescribeStackEvents(*cloudformation.DescribeStackEventsInput) (*cloudformation.DescribeStackEventsOutput, error)
}
// StackEvent is a CloudFormation stack event.
type StackEvent struct {
LogicalResourceID string
PhysicalResourceID string
ResourceType string
ResourceStatus string
ResourceStatusReason string
Timestamp time.Time
}
type clock interface {
now() time.Time
}
type realClock struct{}
func (c realClock) now() time.Time {
return time.Now()
}
type fakeClock struct{ fakeNow time.Time }
func (c fakeClock) now() time.Time {
return c.fakeNow
}
// StackStreamer is a Streamer for StackEvent events started by a change set.
type StackStreamer struct {
client StackEventsDescriber
clock clock
rand func(int) int
stackID string
stackName string
changeSetCreationTime time.Time
subscribers []chan StackEvent
isDone bool
pastEventIDs map[string]bool
eventsToFlush []StackEvent
mu sync.Mutex
retries int
}
// NewStackStreamer creates a StackStreamer from a cloudformation client, stack name, and the change set creation timestamp.
func NewStackStreamer(cfn StackEventsDescriber, stackID string, csCreationTime time.Time) *StackStreamer {
return &StackStreamer{
clock: realClock{},
rand: rand.Intn,
client: cfn,
stackID: stackID,
stackName: stackARN(stackID).name(),
changeSetCreationTime: csCreationTime,
pastEventIDs: make(map[string]bool),
}
}
// Name returns the CloudFormation stack's name.
func (s *StackStreamer) Name() string {
return s.stackName
}
// Region returns the region of the CloudFormation stack.
// If the region cannot be parsed from the input stack ID, then return "", false.
func (s *StackStreamer) Region() (string, bool) {
arn, err := awsarn.Parse(s.stackID)
if err != nil {
return "", false // If the stack ID is just a name and not an ARN, we won't be able to retrieve the region.
}
return arn.Region, true
}
// Subscribe returns a read-only channel that will receive stack events from the StackStreamer.
func (s *StackStreamer) Subscribe() <-chan StackEvent {
s.mu.Lock()
defer s.mu.Unlock()
c := make(chan StackEvent)
s.subscribers = append(s.subscribers, c)
if s.isDone {
// If the streamer is already done streaming, any new subscription requests should just return a closed channel.
close(c)
}
return c
}
// Fetch retrieves and stores any new CloudFormation stack events since the ChangeSetCreationTime in chronological order.
// If an error occurs from describe stack events, returns a wrapped error.
// Otherwise, returns the time the next Fetch should be attempted and whether or not there are more events to fetch.
func (s *StackStreamer) Fetch() (next time.Time, done bool, err error) {
var events []StackEvent
var nextToken *string
for {
// DescribeStackEvents returns events in reverse chronological order,
// so we retrieve new events until we go past the ChangeSetCreationTime or we see an already seen event ID.
// This logic is taken from the AWS CDK:
// https://github.com/aws/aws-cdk/blob/43f3f09cc561fd32d651b2c327e877ad81c2ddb2/packages/aws-cdk/lib/api/util/cloudformation/stack-activity-monitor.ts#L230-L234
out, err := s.client.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{
NextToken: nextToken,
StackName: aws.String(s.stackID),
})
if err != nil {
// Check for throttles and wait to try again using the StackStreamer's interval.
if request.IsErrorThrottle(err) {
s.retries += 1
return nextFetchDate(s.clock, s.rand, s.retries), false, nil
}
return next, false, fmt.Errorf("describe stack events %s: %w", s.stackID, err)
}
s.retries = 0
var finished bool
for _, event := range out.StackEvents {
if event.Timestamp.Before(s.changeSetCreationTime) {
finished = true
break
}
if _, seen := s.pastEventIDs[aws.StringValue(event.EventId)]; seen {
finished = true
break
}
logicalID, resourceStatus := aws.StringValue(event.LogicalResourceId), aws.StringValue(event.ResourceStatus)
if logicalID == s.stackName && !cfn.StackStatus(resourceStatus).InProgress() {
done = true
}
events = append(events, StackEvent{
LogicalResourceID: logicalID,
PhysicalResourceID: aws.StringValue(event.PhysicalResourceId),
ResourceType: aws.StringValue(event.ResourceType),
ResourceStatus: resourceStatus,
ResourceStatusReason: aws.StringValue(event.ResourceStatusReason),
Timestamp: aws.TimeValue(event.Timestamp),
})
s.pastEventIDs[aws.StringValue(event.EventId)] = true
}
if finished || out.NextToken == nil {
break
}
nextToken = out.NextToken
}
// Store events to flush in chronological order.
reverse(events)
s.eventsToFlush = append(s.eventsToFlush, events...)
return nextFetchDate(s.clock, s.rand, s.retries), done, nil
}
// Notify flushes all new events to the streamer's subscribers.
func (s *StackStreamer) Notify() {
// Copy current list of subscribers over, so that we can we add more subscribers while
// notifying previous subscribers of older events.
s.mu.Lock()
var subs []chan StackEvent
subs = append(subs, s.subscribers...)
s.mu.Unlock()
for _, event := range s.compress(s.eventsToFlush) {
for _, sub := range subs {
sub <- event
}
}
s.eventsToFlush = nil // reset after flushing all events.
}
// Close closes all subscribed channels notifying them that no more events will be sent
// and causes the streamer to no longer accept any new subscribers.
func (s *StackStreamer) Close() {
s.mu.Lock()
defer s.mu.Unlock()
for _, sub := range s.subscribers {
close(sub)
}
s.isDone = true
}
// compress retains only the last event for each unique resource physical IDs in a batch.
func (s *StackStreamer) compress(batch []StackEvent) []StackEvent {
seen := make(map[string]struct{})
var compressed []StackEvent
for i := len(batch) - 1; i >= 0; i-- {
if _, yes := seen[batch[i].PhysicalResourceID]; yes {
continue
}
seen[batch[i].PhysicalResourceID] = struct{}{}
compressed = append(compressed, batch[i])
}
reverse(compressed)
return compressed
}
type stackARN string
// name returns the name of a stack from its ARN.
// If the input isn't an ARN, then return the input as is.
// name assumes that if an ARN is passed, the format is valid.
func (arn stackARN) name() string {
in := string(arn)
if !strings.HasPrefix(in, "arn:") {
return in
}
parsed, err := awsarn.Parse(in)
if err != nil {
return in
}
resourceParts := strings.SplitN(parsed.Resource, "/", 3) // of the format: "stack/<stackName>/<uuid>"
if len(resourceParts) != 3 {
return in
}
return resourceParts[1]
}
// Taken from https://github.com/golang/go/wiki/SliceTricks#reversing
func reverse(arr []StackEvent) {
for i := len(arr)/2 - 1; i >= 0; i-- {
opp := len(arr) - 1 - i
arr[i], arr[opp] = arr[opp], arr[i]
}
}
| 247 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"fmt"
"math/rand"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation/stackset"
)
// StackSetDescriber is the CloudFormation interface needed to describe the health of a stack set operation.
type StackSetDescriber interface {
InstanceSummaries(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error)
DescribeOperation(name, opID string) (stackset.Operation, error)
}
// StackSetOpEvent represents a stack set operation status update message.
type StackSetOpEvent struct {
Name string // The name of the stack set.
Operation stackset.Operation
}
// StackSetStreamer is a [Streamer] emitting [StackSetOpEvent] messages for instances under modification.
type StackSetStreamer struct {
stackset StackSetDescriber
ssName string
opID string
opStartTime time.Time
subsMu sync.Mutex
subs []chan StackSetOpEvent
isDone bool
lastSentOp stackset.Operation
curOp stackset.Operation
// Overridden in tests.
clock clock
rand func(int) int
retries int
instanceSummariesInterval time.Duration
}
// NewStackSetStreamer creates a StackSetStreamer for the given stack set name and operation.
func NewStackSetStreamer(cfn StackSetDescriber, ssName, opID string, opStartTime time.Time) *StackSetStreamer {
return &StackSetStreamer{
stackset: cfn,
ssName: ssName,
opID: opID,
opStartTime: opStartTime,
clock: realClock{},
rand: rand.Intn,
instanceSummariesInterval: 250 * time.Millisecond,
}
}
// Name returns the CloudFormation stack set's name.
func (s *StackSetStreamer) Name() string {
return s.ssName
}
// InstanceStreamers initializes Streamers for each stack instance that's in progress part of the stack set.
// As long as the operation is in progress, [InstanceStreamers] will keep
// looking for at least one stack instance that's outdated and return only then.
func (s *StackSetStreamer) InstanceStreamers(cfnClientFor func(region string) StackEventsDescriber) ([]*StackStreamer, error) {
var streamers []*StackStreamer
for {
instances, err := s.stackset.InstanceSummaries(s.ssName)
if err != nil {
return nil, fmt.Errorf("describe in progress stack instances for stack set %q: %w", s.ssName, err)
}
for _, instance := range instances {
if !instance.Status.InProgress() || instance.StackID == "" /* new instances won't immediately have an ID */ {
continue
}
streamers = append(streamers, NewStackStreamer(cfnClientFor(instance.Region), instance.StackID, s.opStartTime))
}
if len(streamers) > 0 {
break
}
// It's possible that instance statuses aren't updated immediately after a stack set operation is started.
// If the operation is still ongoing, there must be at least one stack instance that's outdated.
op, err := s.stackset.DescribeOperation(s.ssName, s.opID)
if err != nil {
return nil, fmt.Errorf("describe operation %q for stack set %q: %w", s.opID, s.ssName, err)
}
if !op.Status.InProgress() {
break
}
<-time.After(s.instanceSummariesInterval) // Empirically, instances appear within this timeframe.
}
return streamers, nil
}
// Subscribe returns a read-only channel to receive stack set operation events.
func (s *StackSetStreamer) Subscribe() <-chan StackSetOpEvent {
c := make(chan StackSetOpEvent)
if s.isDone {
close(c)
return c
}
s.subsMu.Lock()
defer s.subsMu.Unlock()
s.subs = append(s.subs, c)
return c
}
// Fetch retrieves and stores the latest CloudFormation stack set operation.
// If an error occurs from describing stack set operation, returns a wrapped error.
// Otherwise, returns the time the next Fetch should be attempted and whether or not there are more operations to fetch.
func (s *StackSetStreamer) Fetch() (next time.Time, done bool, err error) {
op, err := s.stackset.DescribeOperation(s.ssName, s.opID)
if err != nil {
// Check for throttles and wait to try again using the StackSetStreamer's interval.
if request.IsErrorThrottle(err) {
s.retries += 1
return nextFetchDate(s.clock, s.rand, s.retries), false, nil
}
return next, false, fmt.Errorf("describe operation %q for stack set %q: %w", s.opID, s.ssName, err)
}
if op.Status.IsCompleted() {
done = true
}
s.retries = 0
s.curOp = op
return nextFetchDate(s.clock, s.rand, s.retries), done, nil
}
// Notify publishes the stack set's operation description to subscribers only
// if the content changed from the last time Notify was called.
func (s *StackSetStreamer) Notify() {
// Copy current list of subscribers over, so that we can we add more subscribers while
// notifying previous subscribers of operations.
if s.lastSentOp == s.curOp {
return
}
s.subsMu.Lock()
subs := make([]chan StackSetOpEvent, len(s.subs))
copy(subs, s.subs)
s.subsMu.Unlock()
for _, sub := range subs {
sub <- StackSetOpEvent{
Name: s.ssName,
Operation: s.curOp,
}
}
s.lastSentOp = s.curOp
}
// Close closes all subscribed channels notifying them that no more events will be sent
// and causes the streamer to no longer accept any new subscribers.
func (s *StackSetStreamer) Close() {
s.subsMu.Lock()
defer s.subsMu.Unlock()
for _, sub := range s.subs {
close(sub)
}
s.isDone = true
}
| 171 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"errors"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/stretchr/testify/require"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation/stackset"
)
// mockStackSetClient implements the StackSetDescriber interface.
type mockStackSetClient struct {
instanceSummariesFn func(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error)
describeOpFn func(name, opID string) (stackset.Operation, error)
}
func (m mockStackSetClient) InstanceSummaries(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error) {
return m.instanceSummariesFn(name, opts...)
}
func (m mockStackSetClient) DescribeOperation(name, opID string) (stackset.Operation, error) {
return m.describeOpFn(name, opID)
}
func TestStackSetStreamer_InstanceStreamers(t *testing.T) {
t.Run("should return a wrapped error when instance summaries cannot be found", func(t *testing.T) {
// GIVEN
mockStackSet := mockStackSetClient{
instanceSummariesFn: func(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error) {
return nil, errors.New("some error")
},
}
mockStackLocator := func(_ string) StackEventsDescriber {
return mockStackClient{}
}
streamer := NewStackSetStreamer(mockStackSet, "demo-infrastructure", "1", time.Now())
// WHEN
_, err := streamer.InstanceStreamers(mockStackLocator)
// THEN
require.EqualError(t, err, `describe in progress stack instances for stack set "demo-infrastructure": some error`)
})
t.Run("should return immediately if there are stack instances in progress", func(t *testing.T) {
// GIVEN
mockStackSet := mockStackSetClient{
instanceSummariesFn: func(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error) {
return []stackset.InstanceSummary{
{
StackID: "1111",
Region: "us-west-2",
Status: "RUNNING",
},
{
StackID: "2222",
Region: "us-east-1",
Status: "RUNNING",
},
}, nil
},
}
regionalStreamers := make(map[string]int)
mockStackLocator := func(region string) StackEventsDescriber {
regionalStreamers[region] += 1
return mockStackClient{}
}
streamer := NewStackSetStreamer(mockStackSet, "demo-infrastructure", "1", time.Now())
// WHEN
children, err := streamer.InstanceStreamers(mockStackLocator)
// THEN
require.NoError(t, err)
require.Equal(t, 2, len(regionalStreamers), "expected a separate streamer for each region")
require.Equal(t, 2, len(children), "expected as many streamers as instance summaries")
})
t.Run("should return a wrapped error when describing the operation fails", func(t *testing.T) {
// GIVEN
mockStackSet := mockStackSetClient{
instanceSummariesFn: func(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error) {
return []stackset.InstanceSummary{
{
StackID: "1111",
Region: "us-west-2",
Status: "SUCCEEDED",
},
}, nil
},
describeOpFn: func(_, _ string) (stackset.Operation, error) {
return stackset.Operation{}, errors.New("some error")
},
}
regionalStreamers := make(map[string]int)
mockStackLocator := func(region string) StackEventsDescriber {
regionalStreamers[region] += 1
return mockStackClient{}
}
streamer := NewStackSetStreamer(mockStackSet, "demo-infrastructure", "1", time.Now())
// WHEN
_, err := streamer.InstanceStreamers(mockStackLocator)
// THEN
require.EqualError(t, err, `describe operation "1" for stack set "demo-infrastructure": some error`)
})
t.Run("should keep calling InstanceSummary until in progress instances are found", func(t *testing.T) {
// GIVEN
var callCount int
mockStackSet := mockStackSetClient{
instanceSummariesFn: func(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error) {
defer func() { callCount += 1 }()
if callCount == 0 {
return []stackset.InstanceSummary{
{
StackID: "1111",
Region: "us-west-2",
Status: "SUCCEEDED",
},
}, nil
}
return []stackset.InstanceSummary{
{
StackID: "1111",
Region: "us-west-2",
Status: "SUCCEEDED",
},
{
StackID: "2222",
Region: "us-east-1",
Status: "RUNNING",
},
}, nil
},
describeOpFn: func(_, _ string) (stackset.Operation, error) {
return stackset.Operation{
Status: "RUNNING",
}, nil
},
}
regionalStreamers := make(map[string]int)
mockStackLocator := func(region string) StackEventsDescriber {
regionalStreamers[region] += 1
return mockStackClient{}
}
streamer := NewStackSetStreamer(mockStackSet, "demo-infrastructure", "1", time.Now())
streamer.instanceSummariesInterval = 0 // override time to wait interval.
// WHEN
children, err := streamer.InstanceStreamers(mockStackLocator)
// THEN
require.NoError(t, err)
require.Equal(t, 1, len(regionalStreamers), "expected a separate streamer for each region")
require.Equal(t, 1, len(children), "expected as many streamers as instance summaries")
})
}
func TestStackSetStreamer_Subscribe(t *testing.T) {
t.Run("subscribing to a closed streamer should return a closed channel", func(t *testing.T) {
// GIVEN
client := mockStackSetClient{
instanceSummariesFn: func(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error) {
return nil, nil
},
}
streamer := NewStackSetStreamer(client, "demo-infrastructure", "1", time.Now())
streamer.Close()
// WHEN
ch := streamer.Subscribe()
// THEN
_, more := <-ch
require.False(t, more, "there should not be any more messages to send in the channel")
})
}
func TestStackSetStreamer_Close(t *testing.T) {
t.Run("should close all subscribed channels", func(t *testing.T) {
// GIVEN
client := mockStackSetClient{
instanceSummariesFn: func(name string, opts ...stackset.InstanceSummariesOption) ([]stackset.InstanceSummary, error) {
return nil, nil
},
}
streamer := NewStackSetStreamer(client, "demo-infrastructure", "1", time.Now())
first := streamer.Subscribe()
second := streamer.Subscribe()
// WHEN
streamer.Close()
// THEN
_, more := <-first
require.False(t, more, "there should not be any more messages to send in the first channel")
_, more = <-second
require.False(t, more, "there should not be any more messages to send in the second channel")
})
}
func TestStackSetStreamer_Fetch(t *testing.T) {
t.Run("Fetch should return a later timestamp if a throttling error occurs", func(t *testing.T) {
// GIVEN
client := &mockStackSetClient{
describeOpFn: func(_, _ string) (stackset.Operation, error) {
return stackset.Operation{}, awserr.New("RequestThrottled", "throttle err", errors.New("abc"))
},
}
startTime := time.Date(2020, time.November, 23, 16, 0, 0, 0, time.UTC)
streamer := NewStackSetStreamer(client, "demo-infrastructure", "1", startTime)
streamer.clock = fakeClock{fakeNow: startTime}
streamer.rand = func(n int) int { return n }
wantedTime := startTime.Add(2 * streamerFetchIntervalDurationMs * time.Millisecond)
// WHEN
next, _, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Equal(t, wantedTime, next)
})
t.Run("Fetch should return an error when the operation cannot be described", func(t *testing.T) {
// GIVEN
client := &mockStackSetClient{
describeOpFn: func(_, _ string) (stackset.Operation, error) {
return stackset.Operation{}, errors.New("some error")
},
}
streamer := NewStackSetStreamer(client, "demo-infrastructure", "1", time.Now())
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.EqualError(t, err, `describe operation "1" for stack set "demo-infrastructure": some error`)
})
t.Run("Fetch should return the next immediate date on success", func(t *testing.T) {
client := &mockStackSetClient{
describeOpFn: func(_, _ string) (stackset.Operation, error) {
return stackset.Operation{Status: "hello"}, nil
},
}
startTime := time.Date(2020, time.November, 23, 16, 0, 0, 0, time.UTC)
streamer := NewStackSetStreamer(client, "demo-infrastructure", "1", startTime)
streamer.clock = fakeClock{fakeNow: startTime}
streamer.rand = func(n int) int { return n }
// WHEN
next, _, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Equal(t, startTime.Add(streamerFetchIntervalDurationMs*time.Millisecond), next)
})
}
func TestStackSetStreamer_Integration(t *testing.T) {
t.Run("Done if Fetch retrieved a final status", func(t *testing.T) {
// GIVEN
client := &mockStackSetClient{
describeOpFn: func(_, _ string) (stackset.Operation, error) {
return stackset.Operation{
ID: "1",
Status: "SUCCEEDED",
}, nil
},
}
streamer := NewStackSetStreamer(client, "demo-infrastructure", "1", time.Now())
// WHEN
_, done, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.True(t, done)
})
t.Run("should only broadcast unique operations to subscribers", func(t *testing.T) {
// GIVEN
var callCount int
responses := [5]stackset.Operation{
{ID: "1", Status: "QUEUED"},
{ID: "1", Status: "QUEUED"},
{ID: "1", Status: "RUNNING"},
{ID: "1", Status: "STOPPING"},
{ID: "1", Status: "STOPPED", Reason: "manually stopped"},
}
wanted := [4]StackSetOpEvent{
{Name: "demo-infrastructure", Operation: stackset.Operation{ID: "1", Status: "QUEUED"}},
{Name: "demo-infrastructure", Operation: stackset.Operation{ID: "1", Status: "RUNNING"}},
{Name: "demo-infrastructure", Operation: stackset.Operation{ID: "1", Status: "STOPPING"}},
{Name: "demo-infrastructure", Operation: stackset.Operation{ID: "1", Status: "STOPPED", Reason: "manually stopped"}},
}
client := &mockStackSetClient{
describeOpFn: func(_, _ string) (stackset.Operation, error) {
defer func() { callCount += 1 }()
return responses[callCount], nil
},
}
streamer := NewStackSetStreamer(client, "demo-infrastructure", "1", time.Now())
sub := streamer.Subscribe()
// WHEN
go func() {
for i := 0; i < 5; i += 1 {
_, _, err := streamer.Fetch()
require.NoError(t, err, "fetch %d should succeed", i)
streamer.Notify()
}
streamer.Close()
}()
done := make(chan struct{})
var actual []StackSetOpEvent
go func() {
for msg := range sub {
actual = append(actual, msg)
}
close(done)
}()
// THEN
<-done
require.ElementsMatch(t, wanted, actual)
})
}
| 334 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"errors"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/stretchr/testify/require"
)
type mockStackClient struct {
out *cloudformation.DescribeStackEventsOutput
err error
}
func (m mockStackClient) DescribeStackEvents(*cloudformation.DescribeStackEventsInput) (*cloudformation.DescribeStackEventsOutput, error) {
return m.out, m.err
}
func TestStackStreamer_Region(t *testing.T) {
testCases := map[string]struct {
stackID string
wantedRegion string
wantedOK bool
}{
"should return false when the stack id isn't an ARN": {
stackID: "StackSet-demo-infrastructure-7382d3ee-6823-4967-9bcf-8a9118259998",
wantedRegion: "",
wantedOK: false,
},
"should return the region when the stack id is an ARN": {
stackID: "arn:aws:cloudformation:ap-northeast-1:1111:stack/StackSet-demo-infrastructure-7382d3ee-6823-4967-9bcf-8a9118259998/23f0ecb0-1d7e-11ed-af45-06a7c29c9545",
wantedRegion: "ap-northeast-1",
wantedOK: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
streamer := NewStackStreamer(nil, tc.stackID, time.Now())
// WHEN
region, ok := streamer.Region()
// THEN
require.Equal(t, tc.wantedOK, ok)
require.Equal(t, tc.wantedRegion, region)
})
}
}
func TestStackStreamer_Subscribe(t *testing.T) {
t.Run("allow new subscriptions if stack streamer is still active", func(t *testing.T) {
// GIVEN
streamer := &StackStreamer{}
// WHEN
_ = streamer.Subscribe()
_ = streamer.Subscribe()
// THEN
require.Equal(t, 2, len(streamer.subscribers), "expected number of subscribers to match")
})
t.Run("new subscriptions on a finished stack streamer should return closed channels", func(t *testing.T) {
// GIVEN
streamer := &StackStreamer{isDone: true}
// WHEN
ch := streamer.Subscribe()
_, ok := <-ch
// THEN
require.False(t, ok, "channel should be closed")
})
}
func TestStackStreamer_Fetch(t *testing.T) {
t.Run("stores all events in chronological order on fetch and closes done when the stack is no longer in progress", testStackStreamer_Fetch_Success)
t.Run("stores only events after the changeset creation time", testStackStreamer_Fetch_PostChangeSet)
t.Run("stores only events that have not been seen yet", testStackStreamer_Fetch_WithSeenEvents)
t.Run("returns wrapped error if describe call fails", testStackStreamer_Fetch_WithError)
t.Run("throttle results in a gracefully handled error and exponential backoff", testStackStreamer_Fetch_withThrottle)
}
func TestStackStreamer_Notify(t *testing.T) {
// GIVEN
wantedEvents := []StackEvent{
{
LogicalResourceID: "Cluster",
PhysicalResourceID: "Cluster1",
ResourceType: "AWS::ECS::Cluster",
ResourceStatus: "CREATE_COMPLETE",
},
{
LogicalResourceID: "PublicLoadBalancer",
PhysicalResourceID: "ELB1",
ResourceType: "AWS::ElasticLoadBalancingV2::LoadBalancer",
ResourceStatus: "CREATE_COMPLETE",
},
}
sub := make(chan StackEvent, 2)
streamer := &StackStreamer{
subscribers: []chan StackEvent{sub},
eventsToFlush: wantedEvents,
}
// WHEN
streamer.Notify()
close(sub) // Close the channel to stop expecting to receive new events.
// THEN
var actualEvents []StackEvent
for event := range sub {
actualEvents = append(actualEvents, event)
}
require.ElementsMatch(t, wantedEvents, actualEvents)
}
func testStackStreamer_Fetch_Success(t *testing.T) {
// GIVEN
startTime := time.Date(2020, time.November, 23, 16, 0, 0, 0, time.UTC)
client := mockStackClient{
// Events are in reverse chronological order.
out: &cloudformation.DescribeStackEventsOutput{
StackEvents: []*cloudformation.StackEvent{
{
EventId: aws.String("5"),
LogicalResourceId: aws.String("phonetool-test"),
PhysicalResourceId: aws.String("phonetool-test"),
ResourceStatus: aws.String("CREATE_COMPLETE"),
Timestamp: aws.Time(startTime.Add(time.Hour)),
},
{
EventId: aws.String("4"),
LogicalResourceId: aws.String("CloudformationExecutionRole"),
PhysicalResourceId: aws.String("CloudformationExecutionRole-123a"),
ResourceStatus: aws.String("CREATE_FAILED"),
ResourceStatusReason: aws.String("phonetool-test-CFNExecutionRole already exists"),
Timestamp: aws.Time(startTime.Add(time.Hour + 30*time.Minute)),
},
{
EventId: aws.String("3"),
LogicalResourceId: aws.String("Cluster"),
PhysicalResourceId: aws.String("Cluster-6574"),
ResourceStatus: aws.String("CREATE_COMPLETE"),
Timestamp: aws.Time(startTime.Add(2 * time.Hour)),
},
{
EventId: aws.String("2"),
LogicalResourceId: aws.String("Cluster"),
PhysicalResourceId: aws.String("Cluster-6574"),
ResourceStatus: aws.String("CREATE_IN_PROGRESS"),
Timestamp: aws.Time(startTime.Add(3 * time.Hour)),
},
{
EventId: aws.String("1"),
LogicalResourceId: aws.String("PublicLoadBalancer"),
PhysicalResourceId: aws.String("PublicLoadBalancer-2139"),
ResourceStatus: aws.String("CREATE_COMPLETE"),
Timestamp: aws.Time(startTime.Add(4 * time.Hour)),
},
},
},
}
streamer := NewStackStreamer(client, "arn:aws:cloudformation:us-west-2:111111:stack/phonetool-test/b3184400-1429-11ed-a574-0a587ce78f9b", startTime)
// WHEN
beforeFetch := time.Now()
expected, done, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.False(t, beforeFetch.Add(streamerMinFetchIntervalDurationMs*time.Millisecond).After(expected))
require.Equal(t, []StackEvent{
{
LogicalResourceID: "PublicLoadBalancer",
PhysicalResourceID: "PublicLoadBalancer-2139",
ResourceStatus: "CREATE_COMPLETE",
Timestamp: startTime.Add(4 * time.Hour),
},
{
LogicalResourceID: "Cluster",
PhysicalResourceID: "Cluster-6574",
ResourceStatus: "CREATE_COMPLETE",
Timestamp: startTime.Add(2 * time.Hour),
},
{
LogicalResourceID: "CloudformationExecutionRole",
PhysicalResourceID: "CloudformationExecutionRole-123a",
ResourceStatus: "CREATE_FAILED",
ResourceStatusReason: "phonetool-test-CFNExecutionRole already exists",
Timestamp: startTime.Add(time.Hour + 30*time.Minute),
},
{
LogicalResourceID: "phonetool-test",
PhysicalResourceID: "phonetool-test",
ResourceStatus: "CREATE_COMPLETE",
Timestamp: startTime.Add(time.Hour),
},
}, streamer.compress(streamer.eventsToFlush), "expected eventsToFlush to appear in chronological order")
require.True(t, done, "there should be no more work to do since the stack is created")
}
func testStackStreamer_Fetch_PostChangeSet(t *testing.T) {
// GIVEN
client := mockStackClient{
out: &cloudformation.DescribeStackEventsOutput{
StackEvents: []*cloudformation.StackEvent{
{
EventId: aws.String("abc"),
LogicalResourceId: aws.String("Cluster"),
ResourceStatus: aws.String("CREATE_COMPLETE"),
Timestamp: aws.Time(time.Date(2020, time.November, 23, 18, 0, 0, 0, time.UTC)),
},
},
},
}
streamer := &StackStreamer{
client: client,
clock: fakeClock{fakeNow: time.Now()},
rand: func(n int) int { return n },
stackID: "phonetool-test",
stackName: "phonetool-test",
changeSetCreationTime: time.Date(2020, time.November, 23, 19, 0, 0, 0, time.UTC), // An hour after the last event.
}
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Empty(t, streamer.eventsToFlush, "expected eventsToFlush to be empty")
}
func testStackStreamer_Fetch_WithSeenEvents(t *testing.T) {
// GIVEN
startTime := time.Date(2020, time.November, 23, 16, 0, 0, 0, time.UTC)
client := mockStackClient{
out: &cloudformation.DescribeStackEventsOutput{
StackEvents: []*cloudformation.StackEvent{
{
EventId: aws.String("abc"),
LogicalResourceId: aws.String("Cluster"),
ResourceStatus: aws.String("CREATE_COMPLETE"),
Timestamp: aws.Time(startTime.Add(2 * time.Hour)),
},
{
EventId: aws.String("def"),
LogicalResourceId: aws.String("PublicLoadBalancer"),
ResourceStatus: aws.String("CREATE_COMPLETE"),
Timestamp: aws.Time(startTime.Add(time.Hour)),
},
},
},
}
streamer := &StackStreamer{
client: client,
clock: fakeClock{fakeNow: time.Now()},
rand: func(n int) int { return n },
stackID: "phonetool-test",
stackName: "phonetool-test",
changeSetCreationTime: startTime,
pastEventIDs: map[string]bool{
"def": true,
},
}
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.ElementsMatch(t, []StackEvent{
{
LogicalResourceID: "Cluster",
ResourceStatus: "CREATE_COMPLETE",
Timestamp: startTime.Add(2 * time.Hour),
},
}, streamer.eventsToFlush, "expected only the event not seen yet to be flushed")
}
func testStackStreamer_Fetch_WithError(t *testing.T) {
// GIVEN
client := mockStackClient{
err: errors.New("some error"),
}
streamer := &StackStreamer{
client: client,
clock: fakeClock{fakeNow: time.Now()},
rand: func(n int) int { return n },
stackID: "phonetool-test",
stackName: "phonetool-test",
changeSetCreationTime: time.Date(2020, time.November, 23, 16, 0, 0, 0, time.UTC),
}
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.EqualError(t, err, "describe stack events phonetool-test: some error")
}
func testStackStreamer_Fetch_withThrottle(t *testing.T) {
// GIVEN
client := &mockStackClient{
err: awserr.New("RequestThrottled", "throttle err", errors.New("abc")),
}
streamer := &StackStreamer{
client: *client,
clock: fakeClock{fakeNow: time.Date(2020, time.November, 23, 16, 0, 0, 0, time.UTC)},
rand: func(n int) int { return n },
stackID: "phonetool-test",
stackName: "phonetool-test",
changeSetCreationTime: time.Date(2020, time.November, 23, 16, 0, 0, 0, time.UTC),
pastEventIDs: map[string]bool{},
retries: 0,
}
// WHEN
nextDate, _, err := streamer.Fetch()
maxDuration := 2 * streamerFetchIntervalDurationMs * time.Millisecond
require.NoError(t, err, "expect no results and no error for throttle exception")
require.Equal(t, nextDate, time.Date(2020, time.November, 23, 16, 0, 8, 0, time.UTC), "expect that the returned timeout (%s) is less than the maximum for backoff (%d)", time.Until(nextDate), maxDuration)
require.Equal(t, 1, streamer.retries)
}
func TestStackStreamer_Close(t *testing.T) {
// GIVEN
streamer := &StackStreamer{}
c := streamer.Subscribe()
// WHEN
streamer.Close()
// THEN
_, isOpen := <-c
require.False(t, isOpen, "expected subscribed channels to be closed")
require.True(t, streamer.isDone, "should mark the streamer that it won't allow new subscribers")
}
| 348 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"fmt"
"math/rand"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudwatch"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
)
const (
// ECS service deployment constants.
ecsPrimaryDeploymentStatus = "PRIMARY"
rollOutCompleted = "COMPLETED"
rollOutFailed = "FAILED"
rollOutEmpty = ""
)
var ecsEventFailureKeywords = []string{"fail", "unhealthy", "error", "throttle", "unable", "missing", "alarm detected", "rolling back"}
// ECSServiceDescriber is the interface to describe an ECS service.
type ECSServiceDescriber interface {
Service(clusterName, serviceName string) (*ecs.Service, error)
}
// CloudWatchDescriber is the interface to describe CW alarms.
type CloudWatchDescriber interface {
AlarmStatuses(opts ...cloudwatch.DescribeAlarmOpts) ([]cloudwatch.AlarmStatus, error)
}
// ECSDeployment represent an ECS rolling update deployment.
type ECSDeployment struct {
Status string
TaskDefRevision string
DesiredCount int
RunningCount int
FailedCount int
PendingCount int
RolloutState string
CreatedAt time.Time
UpdatedAt time.Time
}
func (d ECSDeployment) isPrimary() bool {
return d.Status == ecsPrimaryDeploymentStatus
}
func (d ECSDeployment) done() bool {
switch d.RolloutState {
case rollOutFailed:
return true
case rollOutCompleted, rollOutEmpty:
return d.DesiredCount == d.RunningCount
default:
return false
}
}
// ECSService is a description of an ECS service.
type ECSService struct {
Deployments []ECSDeployment
LatestFailureEvents []string
Alarms []cloudwatch.AlarmStatus
}
// ECSDeploymentStreamer is a Streamer for ECSService descriptions until the deployment is completed.
type ECSDeploymentStreamer struct {
client ECSServiceDescriber
cw CloudWatchDescriber
clock clock
cluster string
rand func(n int) int
service string
deploymentCreationTime time.Time
subscribers []chan ECSService
isDone bool
pastEventIDs map[string]bool
eventsToFlush []ECSService
mu sync.Mutex
ecsRetries int
cwRetries int
}
// NewECSDeploymentStreamer creates a new ECSDeploymentStreamer that streams service descriptions
// since the deployment creation time and until the primary deployment is completed.
func NewECSDeploymentStreamer(ecs ECSServiceDescriber, cw CloudWatchDescriber, cluster, service string, deploymentCreationTime time.Time) *ECSDeploymentStreamer {
return &ECSDeploymentStreamer{
client: ecs,
cw: cw,
clock: realClock{},
rand: rand.Intn,
cluster: cluster,
service: service,
deploymentCreationTime: deploymentCreationTime,
pastEventIDs: make(map[string]bool),
}
}
// Subscribe returns a read-only channel that will receive service descriptions from the ECSDeploymentStreamer.
func (s *ECSDeploymentStreamer) Subscribe() <-chan ECSService {
s.mu.Lock()
defer s.mu.Unlock()
c := make(chan ECSService)
s.subscribers = append(s.subscribers, c)
if s.isDone {
// If the streamer is already done streaming, any new subscription requests should just return a closed channel.
close(c)
}
return c
}
// Fetch retrieves and stores ECSService descriptions since the deployment's creation time
// until the primary deployment's running count is equal to its desired count.
// If an error occurs from describe service, returns a wrapped err.
// Otherwise, returns the time the next Fetch should be attempted.
func (s *ECSDeploymentStreamer) Fetch() (next time.Time, done bool, err error) {
out, err := s.client.Service(s.cluster, s.service)
if err != nil {
if request.IsErrorThrottle(err) {
s.ecsRetries += 1
return nextFetchDate(s.clock, s.rand, s.ecsRetries), false, nil
}
return next, false, fmt.Errorf("fetch service description: %w", err)
}
s.ecsRetries = 0
var deployments []ECSDeployment
for _, deployment := range out.Deployments {
status := aws.StringValue(deployment.Status)
desiredCount, runningCount := aws.Int64Value(deployment.DesiredCount), aws.Int64Value(deployment.RunningCount)
rollingDeploy := ECSDeployment{
Status: status,
TaskDefRevision: parseRevisionFromTaskDefARN(aws.StringValue(deployment.TaskDefinition)),
DesiredCount: int(desiredCount),
RunningCount: int(runningCount),
FailedCount: int(aws.Int64Value(deployment.FailedTasks)),
PendingCount: int(aws.Int64Value(deployment.PendingCount)),
RolloutState: aws.StringValue(deployment.RolloutState),
CreatedAt: aws.TimeValue(deployment.CreatedAt),
UpdatedAt: aws.TimeValue(deployment.UpdatedAt),
}
deployments = append(deployments, rollingDeploy)
if isDeploymentDone(rollingDeploy, s.deploymentCreationTime) {
done = true
}
}
var failureMsgs []string
for _, event := range out.Events {
if createdAt := aws.TimeValue(event.CreatedAt); createdAt.Before(s.deploymentCreationTime) {
break
}
id := aws.StringValue(event.Id)
if _, ok := s.pastEventIDs[id]; ok {
break
}
if msg := aws.StringValue(event.Message); isFailureServiceEvent(msg) {
failureMsgs = append(failureMsgs, msg)
}
s.pastEventIDs[id] = true
}
var alarms []cloudwatch.AlarmStatus
if out.DeploymentConfiguration != nil && out.DeploymentConfiguration.Alarms != nil && aws.BoolValue(out.DeploymentConfiguration.Alarms.Enable) {
alarmNames := aws.StringValueSlice(out.DeploymentConfiguration.Alarms.AlarmNames)
alarms, err = s.cw.AlarmStatuses(cloudwatch.WithNames(alarmNames))
if err != nil {
if request.IsErrorThrottle(err) {
s.cwRetries += 1
return nextFetchDate(s.clock, s.rand, s.cwRetries), false, nil
}
return next, false, fmt.Errorf("retrieve alarm statuses: %w", err)
}
s.cwRetries = 0
}
s.eventsToFlush = append(s.eventsToFlush, ECSService{
Deployments: deployments,
LatestFailureEvents: failureMsgs,
Alarms: alarms,
})
return nextFetchDate(s.clock, s.rand, 0), done, nil
}
// Notify flushes all new events to the streamer's subscribers.
func (s *ECSDeploymentStreamer) Notify() {
// Copy current list of subscribers over, so that we can we add more subscribers while
// notifying previous subscribers of older events.
s.mu.Lock()
var subs []chan ECSService
subs = append(subs, s.subscribers...)
s.mu.Unlock()
for _, event := range s.eventsToFlush {
for _, sub := range subs {
sub <- event
}
}
s.eventsToFlush = nil // reset after flushing all events.
}
// Close closes all subscribed channels notifying them that no more events will be sent.
func (s *ECSDeploymentStreamer) Close() {
s.mu.Lock()
defer s.mu.Unlock()
for _, sub := range s.subscribers {
close(sub)
}
s.isDone = true
}
// parseRevisionFromTaskDefARN returns the revision number as string given the ARN of a task definition.
// For example, given the input "arn:aws:ecs:us-west-2:1111:task-definition/webapp-test-frontend:3"
// the output is "3".
func parseRevisionFromTaskDefARN(arn string) string {
familyName := strings.Split(arn, "/")[1]
return strings.Split(familyName, ":")[1]
}
func isFailureServiceEvent(msg string) bool {
for _, kw := range ecsEventFailureKeywords {
if strings.Contains(msg, kw) {
return true
}
}
return false
}
func isDeploymentDone(d ECSDeployment, startTime time.Time) bool {
if !d.isPrimary() {
return false
}
if d.UpdatedAt.Before(startTime) {
return false
}
return d.done()
}
| 248 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"errors"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudwatch"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
awsecs "github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/stretchr/testify/require"
)
type mockECS struct {
out *ecs.Service
err error
}
type mockCW struct {
out []cloudwatch.AlarmStatus
err error
}
func (m mockECS) Service(clusterName, serviceName string) (*ecs.Service, error) {
return m.out, m.err
}
func (m mockCW) AlarmStatuses(opts ...cloudwatch.DescribeAlarmOpts) ([]cloudwatch.AlarmStatus, error) {
return m.out, m.err
}
func TestECSDeploymentStreamer_Subscribe(t *testing.T) {
t.Run("allow new subscriptions if stack streamer is still active", func(t *testing.T) {
// GIVEN
streamer := &ECSDeploymentStreamer{}
// WHEN
_ = streamer.Subscribe()
_ = streamer.Subscribe()
// THEN
require.Equal(t, 2, len(streamer.subscribers), "expected number of subscribers to match")
})
t.Run("new subscriptions on a finished stack streamer should return closed channels", func(t *testing.T) {
// GIVEN
streamer := &ECSDeploymentStreamer{isDone: true}
// WHEN
ch := streamer.Subscribe()
_, ok := <-ch
// THEN
require.False(t, ok, "channel should be closed")
})
}
func TestECSDeploymentStreamer_Fetch(t *testing.T) {
t.Run("returns a wrapped error on describe service call failure", func(t *testing.T) {
// GIVEN
m := mockECS{
err: errors.New("some error"),
}
cw := mockCW{}
streamer := NewECSDeploymentStreamer(m, cw, "my-cluster", "my-svc", time.Now())
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.EqualError(t, err, "fetch service description: some error")
})
t.Run("returns a wrapped error on alarm statuses call failure", func(t *testing.T) {
// GIVEN
m := mockECS{
out: &ecs.Service{
DeploymentConfiguration: &awsecs.DeploymentConfiguration{
Alarms: &awsecs.DeploymentAlarms{
AlarmNames: []*string{aws.String("alarm1"), aws.String("alarm2")},
Enable: aws.Bool(true),
Rollback: aws.Bool(true),
},
},
},
}
cw := mockCW{
err: errors.New("some error"),
}
streamer := NewECSDeploymentStreamer(m, cw, "my-cluster", "my-svc", time.Now())
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.EqualError(t, err, "retrieve alarm statuses: some error")
})
t.Run("stores events, alarms, and failures until deployment is done", func(t *testing.T) {
// GIVEN
oldStartDate := time.Date(2020, time.November, 23, 17, 0, 0, 0, time.UTC)
startDate := time.Date(2020, time.November, 23, 18, 0, 0, 0, time.UTC)
m := mockECS{
out: &ecs.Service{
Deployments: []*awsecs.Deployment{
{
DesiredCount: aws.Int64(10),
FailedTasks: aws.Int64(0),
PendingCount: aws.Int64(0),
RolloutState: aws.String("COMPLETED"),
RunningCount: aws.Int64(10),
Status: aws.String("PRIMARY"),
TaskDefinition: aws.String("arn:aws:ecs:us-west-2:1111:task-definition/myapp-test-mysvc:2"),
UpdatedAt: aws.Time(startDate),
},
{
DesiredCount: aws.Int64(10),
FailedTasks: aws.Int64(10),
PendingCount: aws.Int64(0),
RolloutState: aws.String("FAILED"),
RunningCount: aws.Int64(0),
Status: aws.String("ACTIVE"),
TaskDefinition: aws.String("arn:aws:ecs:us-west-2:1111:task-definition/myapp-test-mysvc:1"),
UpdatedAt: aws.Time(oldStartDate),
},
},
DeploymentConfiguration: &awsecs.DeploymentConfiguration{
Alarms: &awsecs.DeploymentAlarms{
AlarmNames: []*string{aws.String("alarm1"), aws.String("alarm2")},
Enable: aws.Bool(true),
Rollback: aws.Bool(true),
},
},
Events: []*awsecs.ServiceEvent{
{
CreatedAt: aws.Time(startDate),
Id: aws.String("id1"),
Message: aws.String("deployment failed: alarm detected"),
},
{
CreatedAt: aws.Time(startDate),
Id: aws.String("id2"),
Message: aws.String("rolling back to deployment X"),
},
},
},
}
cw := mockCW{
out: []cloudwatch.AlarmStatus{
{
Name: "alarm1",
Status: "OK",
},
{
Name: "alarm2",
Status: "ALARM",
},
},
}
streamer := NewECSDeploymentStreamer(m, cw, "my-cluster", "my-svc", startDate)
// WHEN
_, done, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Equal(t, []ECSService{
{
Deployments: []ECSDeployment{
{
Status: "PRIMARY",
TaskDefRevision: "2",
DesiredCount: 10,
RunningCount: 10,
FailedCount: 0,
PendingCount: 0,
RolloutState: "COMPLETED",
UpdatedAt: startDate,
},
{
Status: "ACTIVE",
TaskDefRevision: "1",
DesiredCount: 10,
RunningCount: 0,
FailedCount: 10,
PendingCount: 0,
RolloutState: "FAILED",
UpdatedAt: oldStartDate,
},
},
Alarms: []cloudwatch.AlarmStatus{
{
Name: "alarm1",
Status: "OK",
},
{
Name: "alarm2",
Status: "ALARM",
},
},
LatestFailureEvents: []string{"deployment failed: alarm detected", "rolling back to deployment X"},
},
}, streamer.eventsToFlush)
require.True(t, done, "there should be no more work to do since the deployment is completed")
})
t.Run("stores events until deployment is done without circuit breaker", func(t *testing.T) {
// GIVEN
oldStartDate := time.Date(2020, time.November, 23, 17, 0, 0, 0, time.UTC)
startDate := time.Date(2020, time.November, 23, 18, 0, 0, 0, time.UTC)
m := mockECS{
out: &ecs.Service{
Deployments: []*awsecs.Deployment{
{
DesiredCount: aws.Int64(10),
FailedTasks: aws.Int64(0),
PendingCount: aws.Int64(0),
RunningCount: aws.Int64(10),
Status: aws.String("PRIMARY"),
TaskDefinition: aws.String("arn:aws:ecs:us-west-2:1111:task-definition/myapp-test-mysvc:2"),
UpdatedAt: aws.Time(startDate),
},
{
DesiredCount: aws.Int64(10),
FailedTasks: aws.Int64(10),
PendingCount: aws.Int64(0),
RunningCount: aws.Int64(0),
Status: aws.String("ACTIVE"),
TaskDefinition: aws.String("arn:aws:ecs:us-west-2:1111:task-definition/myapp-test-mysvc:1"),
UpdatedAt: aws.Time(oldStartDate),
},
},
DeploymentConfiguration: &awsecs.DeploymentConfiguration{
DeploymentCircuitBreaker: &awsecs.DeploymentCircuitBreaker{
Enable: aws.Bool(false),
Rollback: aws.Bool(true),
},
},
},
}
cw := mockCW{}
streamer := NewECSDeploymentStreamer(m, cw, "my-cluster", "my-svc", startDate)
// WHEN
_, done, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Equal(t, []ECSService{
{
Deployments: []ECSDeployment{
{
Status: "PRIMARY",
TaskDefRevision: "2",
DesiredCount: 10,
RunningCount: 10,
FailedCount: 0,
PendingCount: 0,
UpdatedAt: startDate,
},
{
Status: "ACTIVE",
TaskDefRevision: "1",
DesiredCount: 10,
RunningCount: 0,
FailedCount: 10,
PendingCount: 0,
UpdatedAt: oldStartDate,
},
},
LatestFailureEvents: nil,
},
}, streamer.eventsToFlush)
require.True(t, done, "there should be no more work to do since the deployment is completed")
})
t.Run("stores only failure event messages", func(t *testing.T) {
// GIVEN
startDate := time.Date(2020, time.November, 23, 18, 0, 0, 0, time.UTC)
m := mockECS{
out: &ecs.Service{
Events: []*awsecs.ServiceEvent{
{
// Failure event
Id: aws.String("1"),
Message: aws.String("(service my-svc) deployment ecs-svc/0205655736282798388 deployment failed: alarm detected."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Failure event
Id: aws.String("2"),
Message: aws.String("(service my-svc) rolling back to deployment ecs-svc/9086637243870003494."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Failure event
Id: aws.String("3"),
Message: aws.String("(service my-svc) failed to register targets in (target-group 1234) with (error some-error)"),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Success event
Id: aws.String("4"),
Message: aws.String("(service my-svc) registered 1 targets in (target-group 1234)"),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Failure event
Id: aws.String("5"),
Message: aws.String("(service my-svc) failed to launch a task with (error some-error)."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Failure event
Id: aws.String("6"),
Message: aws.String("(service my-svc) (task 1234) failed container health checks."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Success event
Id: aws.String("7"),
Message: aws.String("(service my-svc) has started 1 tasks: (task 1234)."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Failure event
Id: aws.String("8"),
Message: aws.String("(service my-svc) (deployment 123) deployment failed: some-error."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Failure event
Id: aws.String("9"),
Message: aws.String("(service my-svc) was unable to place a task."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
{
// Failure event
Id: aws.String("10"),
Message: aws.String("(service my-svc) (port 80) is unhealthy in (target-group 1234) due to (reason some-error)."),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
},
},
}
streamer := &ECSDeploymentStreamer{
client: m,
clock: fakeClock{startDate},
rand: func(n int) int { return n },
cluster: "my-cluster",
service: "my-svc",
deploymentCreationTime: startDate,
pastEventIDs: make(map[string]bool),
}
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Equal(t, []ECSService{
{
LatestFailureEvents: []string{
"(service my-svc) deployment ecs-svc/0205655736282798388 deployment failed: alarm detected.",
"(service my-svc) rolling back to deployment ecs-svc/9086637243870003494.",
"(service my-svc) failed to register targets in (target-group 1234) with (error some-error)",
"(service my-svc) failed to launch a task with (error some-error).",
"(service my-svc) (task 1234) failed container health checks.",
"(service my-svc) (deployment 123) deployment failed: some-error.",
"(service my-svc) was unable to place a task.",
"(service my-svc) (port 80) is unhealthy in (target-group 1234) due to (reason some-error).",
},
},
}, streamer.eventsToFlush)
})
t.Run("ignores failure events before deployment creation time", func(t *testing.T) {
// GIVEN
startDate := time.Date(2020, time.November, 23, 18, 0, 0, 0, time.UTC)
m := mockECS{
out: &ecs.Service{
Events: []*awsecs.ServiceEvent{
{
// Failure event
Id: aws.String("1"),
Message: aws.String("(service my-svc) failed to register targets in (target-group 1234) with (error some-error)"),
CreatedAt: aws.Time(time.Date(2020, time.November, 23, 17, 0, 0, 0, time.UTC)),
},
},
},
}
streamer := &ECSDeploymentStreamer{
client: m,
clock: fakeClock{startDate},
rand: func(n int) int { return n },
cluster: "my-cluster",
service: "my-svc",
deploymentCreationTime: startDate,
pastEventIDs: make(map[string]bool),
}
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Equal(t, 1, len(streamer.eventsToFlush), "should have only event to flush")
require.Nil(t, streamer.eventsToFlush[0].LatestFailureEvents, "there should be no failed events emitted")
})
t.Run("ignores events that have already been registered", func(t *testing.T) {
// GIVEN
startDate := time.Date(2020, time.November, 23, 18, 0, 0, 0, time.UTC)
m := mockECS{
out: &ecs.Service{
Events: []*awsecs.ServiceEvent{
{
// Failure event
Id: aws.String("1"),
Message: aws.String("(service my-svc) failed to register targets in (target-group 1234) with (error some-error)"),
CreatedAt: aws.Time(startDate.Add(1 * time.Minute)),
},
},
},
}
streamer := &ECSDeploymentStreamer{
client: m,
clock: fakeClock{startDate},
rand: func(n int) int { return n },
cluster: "my-cluster",
service: "my-svc",
deploymentCreationTime: startDate,
pastEventIDs: make(map[string]bool),
}
streamer.pastEventIDs["1"] = true
// WHEN
_, _, err := streamer.Fetch()
// THEN
require.NoError(t, err)
require.Equal(t, 1, len(streamer.eventsToFlush), "should have only one event to flush")
require.Nil(t, streamer.eventsToFlush[0].LatestFailureEvents, "there should be no failed events emitted")
})
}
func TestECSDeploymentStreamer_Notify(t *testing.T) {
// GIVEN
wantedEvents := []ECSService{
{
Deployments: []ECSDeployment{
{
Status: "PRIMARY",
},
{
Status: "ACTIVE",
},
},
},
}
sub := make(chan ECSService, 2)
streamer := &ECSDeploymentStreamer{
subscribers: []chan ECSService{sub},
eventsToFlush: wantedEvents,
clock: fakeClock{fakeNow: time.Now()},
rand: func(n int) int { return n },
}
// WHEN
streamer.Notify()
close(sub) // Close the channel to stop expecting to receive new events.
// THEN
var actualEvents []ECSService
for event := range sub {
actualEvents = append(actualEvents, event)
}
require.ElementsMatch(t, wantedEvents, actualEvents)
}
func TestECSDeploymentStreamer_Close(t *testing.T) {
// GIVEN
streamer := &ECSDeploymentStreamer{}
c := streamer.Subscribe()
// WHEN
streamer.Close()
// THEN
_, isOpen := <-c
require.False(t, isOpen, "expected subscribed channels to be closed")
require.True(t, streamer.isDone, "should mark the streamer that it won't allow new subscribers")
}
| 489 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package stream implements streamers that publish AWS events periodically.
// A streamer fetches AWS events periodically and notifies subscribed channels of them.
package stream
import (
"context"
"time"
)
const (
streamerFetchIntervalDurationMs = 4000 // How long to wait in milliseconds until Fetch is called again for a Streamer.
streamerMaxFetchIntervalDurationMs = 32000 // The maximum duration that a client should wait until Fetch is called again.
streamerMinFetchIntervalDurationMs = 1000 // The minimum duration that a client should wait until Fetch is called again.
)
// Streamer is the interface that groups methods to periodically retrieve events,
// publish them to subscribers, and stop publishing once there are no more events left.
type Streamer interface {
// Fetch fetches events, updates the internal state of the Streamer with new events and returns the next time
// the Fetch call should be attempted. On failure, Fetch returns an error.
Fetch() (next time.Time, done bool, err error)
// Notify publishes all new event updates to subscribers.
Notify()
// Close notifies all subscribers that no more events will be sent.
Close()
}
// Stream streams event updates by calling Fetch followed with Notify until there are no more events left.
// If the context is canceled or Fetch errors, then Stream short-circuits and returns the error.
func Stream(ctx context.Context, streamer Streamer) error {
defer streamer.Close()
var fetchDelay time.Duration // By default there is no delay.
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(fetchDelay):
next, done, err := streamer.Fetch()
if err != nil {
return err
}
streamer.Notify()
if done {
return nil
}
fetchDelay = time.Until(next)
}
}
}
// nextFetchDate returns a time to wait using random jitter and exponential backoff.
func nextFetchDate(clock clock, rand func(int) int, retries int) time.Time {
// waitMs := rand.Intn( // Get a random integer between streamerMinFetchIntervalDurationMs and ...
// min( // the minimum of ...
// streamerMaxFetchIntervalDuration, // the max fetch interval and ...
// streamerFetchIntervalDuration*(1<<retries), // d*2^r, where r=retries and d= the normal
// )-streamerMinFetchIntervalDurationMs
// ) + streamerMinFetchIntervalDurationMs
// See https://www.educative.io/answers/how-to-generate-random-numbers-in-a-given-range-in-go
waitMs :=
rand(
min(
streamerMaxFetchIntervalDurationMs,
streamerFetchIntervalDurationMs*(1<<retries),
)-streamerMinFetchIntervalDurationMs,
) + streamerMinFetchIntervalDurationMs
return clock.now().Add(time.Duration(waitMs) * time.Millisecond)
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
| 81 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"context"
"errors"
"testing"
"time"
"github.com/stretchr/testify/require"
)
// counterStreamer counts the number of times Fetch and Notify are invoked.
type counterStreamer struct {
fetchCount int
notifyCount int
next func() time.Time
}
func (s *counterStreamer) Fetch() (time.Time, bool, error) {
s.fetchCount += 1
return s.next(), false, nil
}
func (s *counterStreamer) Notify() {
s.notifyCount += 1
}
func (s *counterStreamer) Close() {}
// errStreamer returns an error when Fetch is invoked.
type errStreamer struct {
err error
}
func (s *errStreamer) Fetch() (time.Time, bool, error) {
return time.Now(), false, s.err
}
func (s *errStreamer) Notify() {}
func (s *errStreamer) Close() {}
func TestStream(t *testing.T) {
t.Run("returns error from Fetch", func(t *testing.T) {
// GIVEN
wantedErr := errors.New("unexpected fetch error")
streamer := &errStreamer{err: wantedErr}
// WHEN
actualErr := Stream(context.Background(), streamer)
// THEN
require.EqualError(t, actualErr, wantedErr.Error())
})
t.Run("calls Fetch and Notify multiple times until context is canceled", func(t *testing.T) {
t.Parallel()
// GIVEN
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
defer cancel()
streamer := &counterStreamer{
next: func() time.Time {
return time.Now().Add(100 * time.Millisecond)
},
}
// WHEN
err := Stream(ctx, streamer)
// THEN
require.EqualError(t, err, ctx.Err().Error(), "the error returned should be context canceled")
require.Greater(t, streamer.fetchCount, 1, "expected more than one call to Fetch within timeout")
require.Greater(t, streamer.notifyCount, 1, "expected more than one call to Notify within timeout")
})
t.Run("nextFetchDate works correctly to grab times before the timeout.", func(t *testing.T) {
clock := fakeClock{fakeNow: time.Date(2020, time.November, 1, 0, 0, 0, 0, time.UTC)}
rand := func(n int) int { return n }
intervalNS := int(streamerFetchIntervalDurationMs * time.Millisecond)
for r := 0; r < 4; r++ {
a := nextFetchDate(clock, rand, r)
require.Equal(t, a, time.Date(2020, time.November, 1, 0, 0, 0, intervalNS*(1<<r), time.UTC), "require that the given date for 0 retries is less than %dms in the future", streamerFetchIntervalDurationMs*(1<<r))
}
maxIntervalNS := int(streamerMaxFetchIntervalDurationMs * time.Millisecond)
b := nextFetchDate(clock, rand, 10)
require.Equal(t, b, time.Date(2020, time.November, 1, 0, 0, 0, maxIntervalNS, time.UTC), "require that the given date for 10 retries is never more than the max interval")
})
}
| 92 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package task
import (
"fmt"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
)
const (
fmtErrDefaultSubnets = "get default subnet IDs: %w"
)
// ConfigRunner runs an Amazon ECS task in the subnets, security groups, and cluster.
// It uses the default subnets and the default cluster if the corresponding field is empty.
type ConfigRunner struct {
// Count of the tasks to be launched.
Count int
// Group Name of the tasks that use the same task definition.
GroupName string
// The ARN of the cluster to run the task.
Cluster string
// Network configuration
Subnets []string
SecurityGroups []string
// Interfaces to interact with dependencies. Must not be nil.
ClusterGetter DefaultClusterGetter
Starter Runner
// Must not be nil if using default subnets.
VPCGetter VPCGetter
// Figures non-zero exit code of the task.
NonZeroExitCodeGetter NonZeroExitCodeGetter
// Platform configuration
OS string
}
// Run runs tasks given subnets, security groups and the cluster, and returns the tasks.
// If subnets are not provided, it uses the default subnets.
// If cluster is not provided, it uses the default cluster.
func (r *ConfigRunner) Run() ([]*Task, error) {
if err := r.validateDependencies(); err != nil {
return nil, err
}
if r.Cluster == "" {
cluster, err := r.ClusterGetter.DefaultCluster()
if err != nil {
return nil, &errGetDefaultCluster{
parentErr: err,
}
}
r.Cluster = cluster
}
if r.Subnets == nil {
subnets, err := r.VPCGetter.SubnetIDs(ec2.FilterForDefaultVPCSubnets)
if err != nil {
return nil, fmt.Errorf(fmtErrDefaultSubnets, err)
}
if len(subnets) == 0 {
return nil, errNoSubnetFound
}
r.Subnets = subnets
}
platformVersion := "LATEST"
if IsValidWindowsOS(r.OS) {
platformVersion = "1.0.0"
}
ecsTasks, err := r.Starter.RunTask(ecs.RunTaskInput{
Cluster: r.Cluster,
Count: r.Count,
Subnets: r.Subnets,
SecurityGroups: r.SecurityGroups,
TaskFamilyName: taskFamilyName(r.GroupName),
StartedBy: startedBy,
PlatformVersion: platformVersion,
EnableExec: true,
})
if err != nil {
return nil, &errRunTask{
groupName: r.GroupName,
parentErr: err,
}
}
return convertECSTasks(ecsTasks), nil
}
func (r *ConfigRunner) validateDependencies() error {
if r.ClusterGetter == nil {
return errClusterGetterNil
}
if r.Starter == nil {
return errStarterNil
}
return nil
}
// CheckNonZeroExitCode returns the status of the containers part of the given tasks.
func (r *ConfigRunner) CheckNonZeroExitCode(tasks []*Task) error {
taskARNs := make([]string, len(tasks))
for idx, task := range tasks {
taskARNs[idx] = task.TaskARN
}
return r.NonZeroExitCodeGetter.HasNonZeroExitCode(taskARNs, r.Cluster)
}
| 118 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package task
import (
"errors"
"fmt"
"testing"
awsecs "github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/task/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
const (
attachmentTypeName = "ElasticNetworkInterface"
detailsKeyName = "networkInterfaceId"
)
var taskWithENI = ecs.Task{
TaskArn: aws.String("task-1"),
Attachments: []*awsecs.Attachment{
{
Type: aws.String(attachmentTypeName),
Details: []*awsecs.KeyValuePair{
{
Name: aws.String(detailsKeyName),
Value: aws.String("eni-1"),
},
},
},
},
}
var taskWithNoENI = ecs.Task{
TaskArn: aws.String("task-2"),
}
func TestNetworkConfigRunner_Run(t *testing.T) {
testCases := map[string]struct {
count int
groupName string
cluster string
subnets []string
securityGroups []string
os string
arch string
mockClusterGetter func(m *mocks.MockDefaultClusterGetter)
mockStarter func(m *mocks.MockRunner)
MockVPCGetter func(m *mocks.MockVPCGetter)
wantedError error
wantedTasks []*Task
}{
"failed to get default cluster": {
subnets: []string{"subnet-1", "subnet-2"},
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("", errors.New("error getting default cluster"))
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs().AnyTimes()
m.EXPECT().SecurityGroups().AnyTimes()
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(gomock.Any()).Times(0)
},
wantedError: &errGetDefaultCluster{
parentErr: errors.New("error getting default cluster"),
},
},
"failed to kick off tasks with input subnets and security groups": {
count: 1,
groupName: "my-task",
subnets: []string{"subnet-1", "subnet-2"},
securityGroups: []string{"sg-1", "sg-2"},
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).Times(0)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(gomock.Any()).Return(nil, errors.New("error running task"))
},
wantedError: &errRunTask{
groupName: "my-task",
parentErr: errors.New("error running task"),
},
},
"successfully kick off task with both input subnets and security groups": {
count: 1,
groupName: "my-task",
subnets: []string{"subnet-1", "subnet-2"},
securityGroups: []string{"sg-1", "sg-2"},
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).Times(0)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-1", "subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"failed to get default subnets": {
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().AnyTimes()
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).Return(nil, errors.New("error getting subnets"))
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(gomock.Any()).Times(0)
},
wantedError: fmt.Errorf(fmtErrDefaultSubnets, errors.New("error getting subnets")),
},
"successfully kick off task with default subnets": {
count: 1,
groupName: "my-task",
securityGroups: []string{"sg-1", "sg-2"},
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).
Return([]string{"default-subnet-1", "default-subnet-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"default-subnet-1", "default-subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"eni information not found for several tasks": {
count: 1,
groupName: "my-task",
securityGroups: []string{"sg-1", "sg-2"},
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).
Return([]string{"default-subnet-1", "default-subnet-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"default-subnet-1", "default-subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return([]*ecs.Task{
&taskWithENI,
&taskWithNoENI,
&taskWithNoENI,
}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
{
TaskARN: "task-2",
},
{
TaskARN: "task-2",
},
},
},
"successfully kick off task with specified cluster": {
count: 1,
groupName: "my-task",
cluster: "special-cluster",
subnets: []string{"subnet-1", "subnet-2"},
securityGroups: []string{"sg-1", "sg-2"},
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Times(0)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).Times(0)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "special-cluster",
Count: 1,
Subnets: []string{"subnet-1", "subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"successfully kick off task with platform version for windows 2019 core": {
count: 1,
groupName: "my-task",
securityGroups: []string{"sg-1", "sg-2"},
os: "WINDOWS_SERVER_2019_CORE",
arch: "X86_64",
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).
Return([]string{"default-subnet-1", "default-subnet-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"default-subnet-1", "default-subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"successfully kick off task with platform version for windows 2019 full": {
count: 1,
groupName: "my-task",
securityGroups: []string{"sg-1", "sg-2"},
os: "WINDOWS_SERVER_2019_FULL",
arch: "X86_64",
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).
Return([]string{"default-subnet-1", "default-subnet-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"default-subnet-1", "default-subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"successfully kick off task with platform version for windows 2022 core": {
count: 1,
groupName: "my-task",
securityGroups: []string{"sg-1", "sg-2"},
os: "WINDOWS_SERVER_2022_CORE",
arch: "X86_64",
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).
Return([]string{"default-subnet-1", "default-subnet-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"default-subnet-1", "default-subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"successfully kick off task with platform version for windows 2022 full": {
count: 1,
groupName: "my-task",
securityGroups: []string{"sg-1", "sg-2"},
os: "WINDOWS_SERVER_2022_FULL",
arch: "X86_64",
mockClusterGetter: func(m *mocks.MockDefaultClusterGetter) {
m.EXPECT().DefaultCluster().Return("cluster-1", nil)
},
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs([]ec2.Filter{ec2.FilterForDefaultVPCSubnets}).
Return([]string{"default-subnet-1", "default-subnet-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"default-subnet-1", "default-subnet-2"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
MockVPCGetter := mocks.NewMockVPCGetter(ctrl)
mockClusterGetter := mocks.NewMockDefaultClusterGetter(ctrl)
mockStarter := mocks.NewMockRunner(ctrl)
tc.MockVPCGetter(MockVPCGetter)
tc.mockClusterGetter(mockClusterGetter)
tc.mockStarter(mockStarter)
task := &ConfigRunner{
Count: tc.count,
GroupName: tc.groupName,
Cluster: tc.cluster,
Subnets: tc.subnets,
SecurityGroups: tc.securityGroups,
VPCGetter: MockVPCGetter,
ClusterGetter: mockClusterGetter,
Starter: mockStarter,
OS: tc.os,
}
tasks, err := task.Run()
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedTasks, tasks)
}
})
}
}
| 441 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package task
import (
"fmt"
"strings"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/deploy"
)
const (
fmtErrSecurityGroupsFromEnv = "get security groups from environment %s: %w"
fmtErrDescribeEnvironment = "describe environment %s: %w"
fmtErrNumSecurityGroups = "unable to run task with more than 5 security groups: (%d) %s"
envSecurityGroupCFNLogicalIDTagKey = "aws:cloudformation:logical-id"
envSecurityGroupCFNLogicalIDTagValue = "EnvironmentSecurityGroup"
)
// Names for tag filters
var (
fmtTagFilterForApp = fmt.Sprintf(ec2.FmtTagFilter, deploy.AppTagKey)
fmtTagFilterForEnv = fmt.Sprintf(ec2.FmtTagFilter, deploy.EnvTagKey)
)
// EnvRunner can run an Amazon ECS task in the VPC and the cluster of an environment.
type EnvRunner struct {
// Count of the tasks to be launched.
Count int
// Group Name of the tasks that use the same task definition.
GroupName string
// App and Env in which the tasks will be launched.
App string
Env string
// Extra security groups to use.
SecurityGroups []string
// Platform configuration.
OS string
// Interfaces to interact with dependencies. Must not be nil.
VPCGetter VPCGetter
ClusterGetter ClusterGetter
Starter Runner
EnvironmentDescriber environmentDescriber
// Figures non-zero exit code of the task
NonZeroExitCodeGetter NonZeroExitCodeGetter
}
// Run runs tasks in the environment of the application, and returns the tasks.
func (r *EnvRunner) Run() ([]*Task, error) {
if err := r.validateDependencies(); err != nil {
return nil, err
}
cluster, err := r.ClusterGetter.ClusterARN(r.App, r.Env)
if err != nil {
return nil, fmt.Errorf("get cluster for environment %s: %w", r.Env, err)
}
description, err := r.EnvironmentDescriber.Describe()
if err != nil {
return nil, fmt.Errorf(fmtErrDescribeEnvironment, r.Env, err)
}
if len(description.EnvironmentVPC.PublicSubnetIDs) == 0 {
return nil, errNoSubnetFound
}
subnets := description.EnvironmentVPC.PublicSubnetIDs
filters := r.filtersForVPCFromAppEnv()
// Use only environment security group https://github.com/aws/copilot-cli/issues/1882.
securityGroups, err := r.VPCGetter.SecurityGroups(append(filters, ec2.Filter{
Name: fmt.Sprintf(ec2.FmtTagFilter, envSecurityGroupCFNLogicalIDTagKey),
Values: []string{envSecurityGroupCFNLogicalIDTagValue},
})...)
if err != nil {
return nil, fmt.Errorf(fmtErrSecurityGroupsFromEnv, r.Env, err)
}
securityGroups = appendUniqueStrings(securityGroups, r.SecurityGroups...)
if numSGs := len(securityGroups); numSGs > 5 {
return nil, fmt.Errorf(fmtErrNumSecurityGroups, numSGs, strings.Join(securityGroups, ","))
}
platformVersion := "LATEST"
if IsValidWindowsOS(r.OS) {
platformVersion = "1.0.0"
}
ecsTasks, err := r.Starter.RunTask(ecs.RunTaskInput{
Cluster: cluster,
Count: r.Count,
Subnets: subnets,
SecurityGroups: securityGroups,
TaskFamilyName: taskFamilyName(r.GroupName),
StartedBy: startedBy,
PlatformVersion: platformVersion,
EnableExec: true,
})
if err != nil {
return nil, &errRunTask{
groupName: r.GroupName,
parentErr: err,
}
}
return convertECSTasks(ecsTasks), nil
}
func (r *EnvRunner) filtersForVPCFromAppEnv() []ec2.Filter {
return []ec2.Filter{
{
Name: fmtTagFilterForEnv,
Values: []string{r.Env},
},
{
Name: fmtTagFilterForApp,
Values: []string{r.App},
},
}
}
func (r *EnvRunner) validateDependencies() error {
if r.VPCGetter == nil {
return errVPCGetterNil
}
if r.ClusterGetter == nil {
return errClusterGetterNil
}
if r.Starter == nil {
return errStarterNil
}
return nil
}
func appendUniqueStrings(s1 []string, s2 ...string) []string {
for _, v := range s2 {
if !containsString(s1, v) {
s1 = append(s1, v)
}
}
return s1
}
func containsString(s []string, search string) bool {
for _, v := range s {
if v == search {
return true
}
}
return false
}
// CheckNonZeroExitCode returns the status of the containers part of the given tasks.
func (r *EnvRunner) CheckNonZeroExitCode(tasks []*Task) error {
cluster, err := r.ClusterGetter.ClusterARN(r.App, r.Env)
if err != nil {
return fmt.Errorf("get cluster for environment %s: %w", r.Env, err)
}
taskARNs := make([]string, len(tasks))
for idx, task := range tasks {
taskARNs[idx] = task.TaskARN
}
return r.NonZeroExitCodeGetter.HasNonZeroExitCode(taskARNs, cluster)
}
| 175 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package task
import (
"errors"
"fmt"
"testing"
awsecs "github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/describe"
"github.com/aws/copilot-cli/internal/pkg/task/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
func TestEnvRunner_Run(t *testing.T) {
inApp := "my-app"
inEnv := "my-env"
filtersForSubnetID := []ec2.Filter{
{
Name: fmtTagFilterForEnv,
Values: []string{inEnv},
},
{
Name: fmtTagFilterForApp,
Values: []string{inApp},
},
}
filtersForSecurityGroup := append(filtersForSubnetID, ec2.Filter{
Name: "tag:aws:cloudformation:logical-id",
Values: []string{"EnvironmentSecurityGroup"},
})
mockClusterGetter := func(m *mocks.MockClusterGetter) {
m.EXPECT().ClusterARN(inApp, inEnv).Return("cluster-1", nil)
}
mockVPCGetterAny := func(m *mocks.MockVPCGetter) {
m.EXPECT().SubnetIDs(gomock.Any()).AnyTimes()
m.EXPECT().SecurityGroups(gomock.Any()).AnyTimes()
}
mockStarterNotRun := func(m *mocks.MockRunner) {
m.EXPECT().RunTask(gomock.Any()).Times(0)
}
mockEnvironmentDescriberAny := func(m *mocks.MockenvironmentDescriber) {
m.EXPECT().Describe().AnyTimes()
}
mockEnvironmentDescriberValid := func(m *mocks.MockenvironmentDescriber) {
m.EXPECT().Describe().Return(&describe.EnvDescription{
EnvironmentVPC: describe.EnvironmentVPC{
ID: "vpc-012abcd345",
PublicSubnetIDs: []string{"subnet-0789ab", "subnet-0123cd"},
PrivateSubnetIDs: []string{"subnet-023ff", "subnet-04af"},
},
}, nil)
}
taskWithENI = ecs.Task{
TaskArn: aws.String("task-1"),
Attachments: []*awsecs.Attachment{
{
Type: aws.String(attachmentTypeName),
Details: []*awsecs.KeyValuePair{
{
Name: aws.String(detailsKeyName),
Value: aws.String("eni-1"),
},
},
},
},
}
taskWithNoENI = ecs.Task{
TaskArn: aws.String("task-2"),
}
testCases := map[string]struct {
count int
groupName string
os string
arch string
securityGroups []string
MockVPCGetter func(m *mocks.MockVPCGetter)
MockClusterGetter func(m *mocks.MockClusterGetter)
mockStarter func(m *mocks.MockRunner)
mockEnvironmentDescriber func(m *mocks.MockenvironmentDescriber)
wantedError error
wantedTasks []*Task
}{
"failed to get cluster": {
MockClusterGetter: func(m *mocks.MockClusterGetter) {
m.EXPECT().ClusterARN(inApp, inEnv).Return("", errors.New("error getting resources"))
},
MockVPCGetter: mockVPCGetterAny,
mockStarter: mockStarterNotRun,
mockEnvironmentDescriber: mockEnvironmentDescriberAny,
wantedError: fmt.Errorf("get cluster for environment %s: %w", inEnv, errors.New("error getting resources")),
},
"failed to get env description": {
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(gomock.Any()).AnyTimes()
},
mockStarter: mockStarterNotRun,
mockEnvironmentDescriber: func(m *mocks.MockenvironmentDescriber) {
m.EXPECT().Describe().Return(nil, errors.New("error getting env description"))
},
wantedError: fmt.Errorf(fmtErrDescribeEnvironment, inEnv, errors.New("error getting env description")),
},
"no subnet is found": {
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(gomock.Any()).AnyTimes()
},
mockStarter: mockStarterNotRun,
mockEnvironmentDescriber: func(m *mocks.MockenvironmentDescriber) {
m.EXPECT().Describe().Return(&describe.EnvDescription{
EnvironmentVPC: describe.EnvironmentVPC{
ID: "vpc-012abcd345",
PublicSubnetIDs: []string{},
PrivateSubnetIDs: []string{"subnet-023ff", "subnet-04af"},
},
}, nil)
},
wantedError: errNoSubnetFound,
},
"failed to get security groups": {
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).
Return(nil, errors.New("error getting security groups"))
},
mockStarter: mockStarterNotRun,
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedError: fmt.Errorf(fmtErrSecurityGroupsFromEnv, inEnv, errors.New("error getting security groups")),
},
"failed with too many security groups": {
securityGroups: []string{"sg-2", "sg-3", "sg-4", "sg-5", "sg-6"},
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: mockStarterNotRun,
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedError: fmt.Errorf(fmtErrNumSecurityGroups, 6, "sg-1,sg-2,sg-3,sg-4,sg-5,sg-6"),
},
"failed to kick off task": {
count: 1,
groupName: "my-task",
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return(nil, errors.New("error running task"))
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedError: &errRunTask{
groupName: "my-task",
parentErr: errors.New("error running task"),
},
},
"run in env success": {
count: 1,
groupName: "my-task",
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"run in env with extra security groups success": {
count: 1,
groupName: "my-task",
securityGroups: []string{"sg-1", "sg-extra"},
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2", "sg-extra"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"run in env with windows os success 2019 core": {
count: 1,
groupName: "my-task",
os: "WINDOWS_SERVER_2019_CORE",
arch: "X86_64",
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"run in env with windows os success 2019 full": {
count: 1,
groupName: "my-task",
os: "WINDOWS_SERVER_2019_FULL",
arch: "X86_64",
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"run in env with windows os success 2022 core": {
count: 1,
groupName: "my-task",
os: "WINDOWS_SERVER_2022_CORE",
arch: "X86_64",
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"run in env with windows os success 2022 full": {
count: 1,
groupName: "my-task",
os: "WINDOWS_SERVER_2022_FULL",
arch: "X86_64",
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "1.0.0",
EnableExec: true,
}).Return([]*ecs.Task{&taskWithENI}, nil)
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
},
},
"eni information not found for several tasks": {
count: 1,
groupName: "my-task",
MockClusterGetter: mockClusterGetter,
MockVPCGetter: func(m *mocks.MockVPCGetter) {
m.EXPECT().SecurityGroups(filtersForSecurityGroup).Return([]string{"sg-1", "sg-2"}, nil)
},
mockStarter: func(m *mocks.MockRunner) {
m.EXPECT().RunTask(ecs.RunTaskInput{
Cluster: "cluster-1",
Count: 1,
Subnets: []string{"subnet-0789ab", "subnet-0123cd"},
SecurityGroups: []string{"sg-1", "sg-2"},
TaskFamilyName: taskFamilyName("my-task"),
StartedBy: startedBy,
PlatformVersion: "LATEST",
EnableExec: true,
}).Return([]*ecs.Task{
&taskWithENI,
&taskWithNoENI,
&taskWithNoENI,
}, nil)
},
mockEnvironmentDescriber: mockEnvironmentDescriberValid,
wantedTasks: []*Task{
{
TaskARN: "task-1",
ENI: "eni-1",
},
{
TaskARN: "task-2",
},
{
TaskARN: "task-2",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
MockVPCGetter := mocks.NewMockVPCGetter(ctrl)
MockClusterGetter := mocks.NewMockClusterGetter(ctrl)
mockStarter := mocks.NewMockRunner(ctrl)
mockEnvironmentDescriber := mocks.NewMockenvironmentDescriber(ctrl)
tc.MockVPCGetter(MockVPCGetter)
tc.MockClusterGetter(MockClusterGetter)
tc.mockStarter(mockStarter)
tc.mockEnvironmentDescriber(mockEnvironmentDescriber)
task := &EnvRunner{
Count: tc.count,
GroupName: tc.groupName,
App: inApp,
Env: inEnv,
OS: tc.os,
SecurityGroups: tc.securityGroups,
VPCGetter: MockVPCGetter,
ClusterGetter: MockClusterGetter,
Starter: mockStarter,
EnvironmentDescriber: mockEnvironmentDescriber,
}
tasks, err := task.Run()
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedTasks, tasks)
}
})
}
}
| 439 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package task
import (
"errors"
"fmt"
)
var (
errNoSubnetFound = errors.New("no subnets found")
errVPCGetterNil = errors.New("vpc getter is not set")
errClusterGetterNil = errors.New("cluster getter is not set")
errStarterNil = errors.New("starter is not set")
)
type errRunTask struct {
groupName string
parentErr error
}
func (e *errRunTask) Error() string {
return fmt.Sprintf("run task %s: %v", e.groupName, e.parentErr)
}
type errGetDefaultCluster struct {
parentErr error
}
func (e *errGetDefaultCluster) Error() string {
return fmt.Sprintf("get default cluster: %v", e.parentErr)
}
| 35 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package task provides support for running Amazon ECS tasks.
package task
import (
"fmt"
"time"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/describe"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/aws/aws-sdk-go/aws"
)
// VPCGetter wraps methods of getting VPC info.
type VPCGetter interface {
SubnetIDs(filters ...ec2.Filter) ([]string, error)
SecurityGroups(filters ...ec2.Filter) ([]string, error)
}
// ClusterGetter wraps the method of getting a cluster ARN.
type ClusterGetter interface {
ClusterARN(app, env string) (string, error)
}
// DefaultClusterGetter wraps the method of getting a default cluster ARN.
type DefaultClusterGetter interface {
DefaultCluster() (string, error)
}
type environmentDescriber interface {
Describe() (*describe.EnvDescription, error)
}
// NonZeroExitCodeGetter wraps the method of getting a non-zero exit code of a task.
type NonZeroExitCodeGetter interface {
HasNonZeroExitCode([]string, string) error
}
// Runner wraps the method of running tasks.
type Runner interface {
RunTask(input ecs.RunTaskInput) ([]*ecs.Task, error)
}
// Task represents a one-off workload that runs until completed or an error occurs.
type Task struct {
TaskARN string
ClusterARN string
StartedAt *time.Time
ENI string
}
const (
startedBy = "copilot-task"
// Platform options.
osLinux = template.OSLinux
osWindowsServer2019Full = template.OSWindowsServer2019Full
osWindowsServer2019Core = template.OSWindowsServer2019Core
osWindowsServer2022Full = template.OSWindowsServer2022Full
osWindowsServer2022Core = template.OSWindowsServer2022Core
archX86 = template.ArchX86
archARM64 = template.ArchARM64
)
var (
validWindowsOSs = []string{osWindowsServer2019Core, osWindowsServer2019Full, osWindowsServer2022Core, osWindowsServer2022Full}
// ValidCFNPlatforms are valid docker platforms for running ECS tasks.
ValidCFNPlatforms = []string{
dockerengine.PlatformString(osWindowsServer2019Core, archX86),
dockerengine.PlatformString(osWindowsServer2019Full, archX86),
dockerengine.PlatformString(osWindowsServer2022Core, archX86),
dockerengine.PlatformString(osWindowsServer2022Full, archX86),
dockerengine.PlatformString(osLinux, archX86),
dockerengine.PlatformString(osLinux, archARM64)}
fmtTaskFamilyName = "copilot-%s"
)
// IsValidWindowsOS determines if the OS value is an accepted CFN Windows value.
func IsValidWindowsOS(os string) bool {
for _, validWindowsOS := range validWindowsOSs {
if os == validWindowsOS {
return true
}
}
return false
}
func taskFamilyName(groupName string) string {
return fmt.Sprintf(fmtTaskFamilyName, groupName)
}
func newTaskFromECS(ecsTask *ecs.Task) *Task {
taskARN := aws.StringValue(ecsTask.TaskArn)
eni, _ := ecsTask.ENI() // Best-effort parse the ENI. If we can't find an IP address, we won't show it to the customers instead of erroring.
return &Task{
TaskARN: taskARN,
ClusterARN: aws.StringValue(ecsTask.ClusterArn),
StartedAt: ecsTask.StartedAt,
ENI: eni,
}
}
func convertECSTasks(ecsTasks []*ecs.Task) []*Task {
tasks := make([]*Task, len(ecsTasks))
for idx, ecsTask := range ecsTasks {
tasks[idx] = newTaskFromECS(ecsTask)
}
// Even if ENI information is not found for some tasks, we still want to return the other information as we can
return tasks
}
| 120 |
copilot-cli | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: ./internal/pkg/task/task.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
ec2 "github.com/aws/copilot-cli/internal/pkg/aws/ec2"
ecs "github.com/aws/copilot-cli/internal/pkg/aws/ecs"
describe "github.com/aws/copilot-cli/internal/pkg/describe"
gomock "github.com/golang/mock/gomock"
)
// MockVPCGetter is a mock of VPCGetter interface.
type MockVPCGetter struct {
ctrl *gomock.Controller
recorder *MockVPCGetterMockRecorder
}
// MockVPCGetterMockRecorder is the mock recorder for MockVPCGetter.
type MockVPCGetterMockRecorder struct {
mock *MockVPCGetter
}
// NewMockVPCGetter creates a new mock instance.
func NewMockVPCGetter(ctrl *gomock.Controller) *MockVPCGetter {
mock := &MockVPCGetter{ctrl: ctrl}
mock.recorder = &MockVPCGetterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVPCGetter) EXPECT() *MockVPCGetterMockRecorder {
return m.recorder
}
// SecurityGroups mocks base method.
func (m *MockVPCGetter) SecurityGroups(filters ...ec2.Filter) ([]string, error) {
m.ctrl.T.Helper()
varargs := []interface{}{}
for _, a := range filters {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SecurityGroups", varargs...)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SecurityGroups indicates an expected call of SecurityGroups.
func (mr *MockVPCGetterMockRecorder) SecurityGroups(filters ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SecurityGroups", reflect.TypeOf((*MockVPCGetter)(nil).SecurityGroups), filters...)
}
// SubnetIDs mocks base method.
func (m *MockVPCGetter) SubnetIDs(filters ...ec2.Filter) ([]string, error) {
m.ctrl.T.Helper()
varargs := []interface{}{}
for _, a := range filters {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SubnetIDs", varargs...)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SubnetIDs indicates an expected call of SubnetIDs.
func (mr *MockVPCGetterMockRecorder) SubnetIDs(filters ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetIDs", reflect.TypeOf((*MockVPCGetter)(nil).SubnetIDs), filters...)
}
// MockClusterGetter is a mock of ClusterGetter interface.
type MockClusterGetter struct {
ctrl *gomock.Controller
recorder *MockClusterGetterMockRecorder
}
// MockClusterGetterMockRecorder is the mock recorder for MockClusterGetter.
type MockClusterGetterMockRecorder struct {
mock *MockClusterGetter
}
// NewMockClusterGetter creates a new mock instance.
func NewMockClusterGetter(ctrl *gomock.Controller) *MockClusterGetter {
mock := &MockClusterGetter{ctrl: ctrl}
mock.recorder = &MockClusterGetterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClusterGetter) EXPECT() *MockClusterGetterMockRecorder {
return m.recorder
}
// ClusterARN mocks base method.
func (m *MockClusterGetter) ClusterARN(app, env string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClusterARN", app, env)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ClusterARN indicates an expected call of ClusterARN.
func (mr *MockClusterGetterMockRecorder) ClusterARN(app, env interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterARN", reflect.TypeOf((*MockClusterGetter)(nil).ClusterARN), app, env)
}
// MockDefaultClusterGetter is a mock of DefaultClusterGetter interface.
type MockDefaultClusterGetter struct {
ctrl *gomock.Controller
recorder *MockDefaultClusterGetterMockRecorder
}
// MockDefaultClusterGetterMockRecorder is the mock recorder for MockDefaultClusterGetter.
type MockDefaultClusterGetterMockRecorder struct {
mock *MockDefaultClusterGetter
}
// NewMockDefaultClusterGetter creates a new mock instance.
func NewMockDefaultClusterGetter(ctrl *gomock.Controller) *MockDefaultClusterGetter {
mock := &MockDefaultClusterGetter{ctrl: ctrl}
mock.recorder = &MockDefaultClusterGetterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDefaultClusterGetter) EXPECT() *MockDefaultClusterGetterMockRecorder {
return m.recorder
}
// DefaultCluster mocks base method.
func (m *MockDefaultClusterGetter) DefaultCluster() (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DefaultCluster")
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DefaultCluster indicates an expected call of DefaultCluster.
func (mr *MockDefaultClusterGetterMockRecorder) DefaultCluster() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultCluster", reflect.TypeOf((*MockDefaultClusterGetter)(nil).DefaultCluster))
}
// MockenvironmentDescriber is a mock of environmentDescriber interface.
type MockenvironmentDescriber struct {
ctrl *gomock.Controller
recorder *MockenvironmentDescriberMockRecorder
}
// MockenvironmentDescriberMockRecorder is the mock recorder for MockenvironmentDescriber.
type MockenvironmentDescriberMockRecorder struct {
mock *MockenvironmentDescriber
}
// NewMockenvironmentDescriber creates a new mock instance.
func NewMockenvironmentDescriber(ctrl *gomock.Controller) *MockenvironmentDescriber {
mock := &MockenvironmentDescriber{ctrl: ctrl}
mock.recorder = &MockenvironmentDescriberMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockenvironmentDescriber) EXPECT() *MockenvironmentDescriberMockRecorder {
return m.recorder
}
// Describe mocks base method.
func (m *MockenvironmentDescriber) Describe() (*describe.EnvDescription, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Describe")
ret0, _ := ret[0].(*describe.EnvDescription)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Describe indicates an expected call of Describe.
func (mr *MockenvironmentDescriberMockRecorder) Describe() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Describe", reflect.TypeOf((*MockenvironmentDescriber)(nil).Describe))
}
// MockNonZeroExitCodeGetter is a mock of NonZeroExitCodeGetter interface.
type MockNonZeroExitCodeGetter struct {
ctrl *gomock.Controller
recorder *MockNonZeroExitCodeGetterMockRecorder
}
// MockNonZeroExitCodeGetterMockRecorder is the mock recorder for MockNonZeroExitCodeGetter.
type MockNonZeroExitCodeGetterMockRecorder struct {
mock *MockNonZeroExitCodeGetter
}
// NewMockNonZeroExitCodeGetter creates a new mock instance.
func NewMockNonZeroExitCodeGetter(ctrl *gomock.Controller) *MockNonZeroExitCodeGetter {
mock := &MockNonZeroExitCodeGetter{ctrl: ctrl}
mock.recorder = &MockNonZeroExitCodeGetterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockNonZeroExitCodeGetter) EXPECT() *MockNonZeroExitCodeGetterMockRecorder {
return m.recorder
}
// HasNonZeroExitCode mocks base method.
func (m *MockNonZeroExitCodeGetter) HasNonZeroExitCode(arg0 []string, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasNonZeroExitCode", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// HasNonZeroExitCode indicates an expected call of HasNonZeroExitCode.
func (mr *MockNonZeroExitCodeGetterMockRecorder) HasNonZeroExitCode(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasNonZeroExitCode", reflect.TypeOf((*MockNonZeroExitCodeGetter)(nil).HasNonZeroExitCode), arg0, arg1)
}
// MockRunner is a mock of Runner interface.
type MockRunner struct {
ctrl *gomock.Controller
recorder *MockRunnerMockRecorder
}
// MockRunnerMockRecorder is the mock recorder for MockRunner.
type MockRunnerMockRecorder struct {
mock *MockRunner
}
// NewMockRunner creates a new mock instance.
func NewMockRunner(ctrl *gomock.Controller) *MockRunner {
mock := &MockRunner{ctrl: ctrl}
mock.recorder = &MockRunnerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockRunner) EXPECT() *MockRunnerMockRecorder {
return m.recorder
}
// RunTask mocks base method.
func (m *MockRunner) RunTask(input ecs.RunTaskInput) ([]*ecs.Task, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunTask", input)
ret0, _ := ret[0].([]*ecs.Task)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RunTask indicates an expected call of RunTask.
func (mr *MockRunnerMockRecorder) RunTask(input interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunTask", reflect.TypeOf((*MockRunner)(nil).RunTask), input)
}
| 265 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"bytes"
"fmt"
"strconv"
"text/template"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
)
const (
envCFTemplatePath = "environment/cf.yml"
fmtEnvCFSubTemplatePath = "environment/partials/%s.yml"
envBootstrapCFTemplatePath = "environment/bootstrap-cf.yml"
)
// Available env-controller managed feature names.
const (
ALBFeatureName = "ALBWorkloads"
EFSFeatureName = "EFSWorkloads"
NATFeatureName = "NATWorkloads"
InternalALBFeatureName = "InternalALBWorkloads"
AliasesFeatureName = "Aliases"
AppRunnerPrivateServiceFeatureName = "AppRunnerPrivateWorkloads"
)
// LastForceDeployIDOutputName is the logical ID of the deployment controller output.
const LastForceDeployIDOutputName = "LastForceDeployID"
var friendlyEnvFeatureName = map[string]string{
ALBFeatureName: "ALB",
EFSFeatureName: "EFS",
NATFeatureName: "NAT Gateway",
InternalALBFeatureName: "Internal ALB",
AliasesFeatureName: "Aliases",
AppRunnerPrivateServiceFeatureName: "App Runner Private Services",
}
var leastVersionForFeature = map[string]string{
ALBFeatureName: "v1.0.0",
EFSFeatureName: "v1.3.0",
NATFeatureName: "v1.3.0",
InternalALBFeatureName: "v1.10.0",
AliasesFeatureName: "v1.4.0",
AppRunnerPrivateServiceFeatureName: "v1.23.0",
}
// AvailableEnvFeatures returns a list of the latest available feature, named after their corresponding parameter names.
func AvailableEnvFeatures() []string {
return []string{ALBFeatureName, EFSFeatureName, NATFeatureName, InternalALBFeatureName, AliasesFeatureName, AppRunnerPrivateServiceFeatureName}
}
// FriendlyEnvFeatureName returns a user-friendly feature name given a env-controller managed parameter name.
// If there isn't one, it returns the parameter name that it is given.
func FriendlyEnvFeatureName(feature string) string {
friendly, ok := friendlyEnvFeatureName[feature]
if !ok {
return feature
}
return friendly
}
// LeastVersionForFeature maps each feature to the least environment template version it requires.
func LeastVersionForFeature(feature string) string {
return leastVersionForFeature[feature]
}
var (
// Template names under "environment/partials/".
envCFSubTemplateNames = []string{
"cdn-resources",
"cfn-execution-role",
"custom-resources",
"custom-resources-role",
"environment-manager-role",
"lambdas",
"vpc-resources",
"nat-gateways",
"bootstrap-resources",
"elb-access-logs",
"mappings-regional-configs",
"ar-vpc-connector",
}
)
var (
// Template names under "environment/partials/".
bootstrapEnvSubTemplateName = []string{
"cfn-execution-role",
"environment-manager-role",
"bootstrap-resources",
}
)
// Addons holds data about an aggregated addons stack.
type Addons struct {
URL string
ExtraParams string
}
// EnvOpts holds data that can be provided to enable features in an environment stack template.
type EnvOpts struct {
AppName string // The application name. Needed to create default value for svc discovery endpoint for upgraded environments.
EnvName string
LatestVersion string
// Custom Resources backed by Lambda functions.
CustomResources map[string]S3ObjectLocation
DNSDelegationLambda string
DNSCertValidatorLambda string
EnableLongARNFormatLambda string
CustomDomainLambda string
Addons *Addons
ScriptBucketName string
PermissionsBoundary string
ArtifactBucketARN string
ArtifactBucketKeyARN string
VPCConfig VPCConfig
PublicHTTPConfig PublicHTTPConfig
PrivateHTTPConfig PrivateHTTPConfig
Telemetry *Telemetry
CDNConfig *CDNConfig
SerializedManifest string // Serialized manifest used to render the environment template.
ForceUpdateID string
DelegateDNS bool
}
// PublicHTTPConfig represents configuration for a public facing Load Balancer.
type PublicHTTPConfig struct {
HTTPConfig
PublicALBSourceIPs []string
CIDRPrefixListIDs []string
ELBAccessLogs *ELBAccessLogs
}
// PrivateHTTPConfig represents configuration for an internal Load Balancer.
type PrivateHTTPConfig struct {
HTTPConfig
CustomALBSubnets []string
}
// HasImportedCerts returns true if any https certificates have been
// imported to the environment.
func (e *EnvOpts) HasImportedCerts() bool {
return len(e.PublicHTTPConfig.ImportedCertARNs) > 0 ||
len(e.PrivateHTTPConfig.ImportedCertARNs) > 0 ||
(e.CDNConfig != nil && e.CDNConfig.ImportedCertificate != nil)
}
// HTTPConfig represents configuration for a Load Balancer.
type HTTPConfig struct {
SSLPolicy *string
ImportedCertARNs []string
}
// ELBAccessLogs represents configuration for ELB access logs S3 bucket.
type ELBAccessLogs struct {
BucketName string
Prefix string
}
// ShouldCreateBucket returns true if copilot should create bucket on behalf of customer.
func (elb *ELBAccessLogs) ShouldCreateBucket() bool {
if elb == nil {
return false
}
return elb.BucketName == ""
}
// CDNConfig represents a Content Delivery Network deployed by CloudFront.
type CDNConfig struct {
ImportedCertificate *string
TerminateTLS bool
Static *CDNStaticAssetConfig
}
// CDNStaticAssetConfig represents static assets config for a Content Delivery Network.
type CDNStaticAssetConfig struct {
Path string
ImportedBucket string
Alias string
}
// VPCConfig represents the VPC configuration.
type VPCConfig struct {
Imported *ImportVPC // If not-nil, use the imported VPC resources instead of the Managed VPC.
Managed ManagedVPC
AllowVPCIngress bool
SecurityGroupConfig *SecurityGroupConfig
FlowLogs *VPCFlowLogs
}
// ImportVPC holds the fields to import VPC resources.
type ImportVPC struct {
ID string
PublicSubnetIDs []string
PrivateSubnetIDs []string
}
// ManagedVPC holds the fields to configure a managed VPC.
type ManagedVPC struct {
CIDR string
AZs []string
PublicSubnetCIDRs []string
PrivateSubnetCIDRs []string
}
// Telemetry represents optional observability and monitoring configuration.
type Telemetry struct {
EnableContainerInsights bool
}
// SecurityGroupConfig holds the fields to import security group config
type SecurityGroupConfig struct {
Ingress []SecurityGroupRule
Egress []SecurityGroupRule
}
// SecurityGroupRule holds the fields to import security group rule
type SecurityGroupRule struct {
CidrIP string
FromPort int
IpProtocol string
ToPort int
}
// VPCFlowLogs holds the fields to configure logging IP traffic using VPC flow logs.
type VPCFlowLogs struct {
Retention *int
}
// ParseEnv parses an environment's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseEnv(data *EnvOpts) (*Content, error) {
tpl, err := t.parse("base", envCFTemplatePath, withEnvParsingFuncs())
if err != nil {
return nil, err
}
for _, templateName := range envCFSubTemplateNames {
nestedTpl, err := t.parse(templateName, fmt.Sprintf(fmtEnvCFSubTemplatePath, templateName), withEnvParsingFuncs())
if err != nil {
return nil, err
}
_, err = tpl.AddParseTree(templateName, nestedTpl.Tree)
if err != nil {
return nil, fmt.Errorf("add parse tree of %s to base template: %w", templateName, err)
}
}
buf := &bytes.Buffer{}
if err := tpl.Execute(buf, data); err != nil {
return nil, fmt.Errorf("execute environment template with data %v: %w", data, err)
}
return &Content{buf}, nil
}
// ParseEnvBootstrap parses the CloudFormation template that bootstrap IAM resources with the specified data object and returns its content.
func (t *Template) ParseEnvBootstrap(data *EnvOpts, options ...ParseOption) (*Content, error) {
tpl, err := t.parse("base", envBootstrapCFTemplatePath, options...)
if err != nil {
return nil, err
}
for _, templateName := range bootstrapEnvSubTemplateName {
nestedTpl, err := t.parse(templateName, fmt.Sprintf(fmtEnvCFSubTemplatePath, templateName), options...)
if err != nil {
return nil, err
}
_, err = tpl.AddParseTree(templateName, nestedTpl.Tree)
if err != nil {
return nil, fmt.Errorf("add parse tree of %s to base template: %w", templateName, err)
}
}
buf := &bytes.Buffer{}
if err := tpl.Execute(buf, data); err != nil {
return nil, fmt.Errorf("execute environment template with data %v: %w", data, err)
}
return &Content{buf}, nil
}
func withEnvParsingFuncs() ParseOption {
return func(t *template.Template) *template.Template {
return t.Funcs(map[string]interface{}{
"inc": IncFunc,
"fmtSlice": FmtSliceFunc,
"quote": strconv.Quote,
"truncate": truncate,
"bucketNameFromURL": bucketNameFromURL,
"logicalIDSafe": StripNonAlphaNumFunc,
})
}
}
func truncate(s string, maxLen int) string {
if len(s) < maxLen {
return s
}
return s[:maxLen]
}
func bucketNameFromURL(url string) string {
bucketName, _, _ := s3.ParseURL(url)
return bucketName
}
| 310 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"fmt"
"regexp"
"testing"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
var featureRegexp = regexp.MustCompile(`\$\{(\w+)}`) // E.g. match ${ALB} and ${EFS}.
func TestEnv_AvailableEnvFeatures(t *testing.T) {
c, err := New().ParseEnv(&EnvOpts{})
require.NoError(t, err)
tmpl := struct {
Outputs map[string]interface{} `yaml:"Outputs"`
}{}
b, err := c.MarshalBinary()
require.NoError(t, err)
err = yaml.Unmarshal(b, &tmpl)
require.NoError(t, err)
enabledFeaturesOutput := tmpl.Outputs["EnabledFeatures"].(map[string]interface{})
enabledFeatures := enabledFeaturesOutput["Value"].(string)
var exists struct{}
featuresSet := make(map[string]struct{})
for _, f := range AvailableEnvFeatures() {
featuresSet[f] = exists
}
for _, match := range featureRegexp.FindAllStringSubmatch(enabledFeatures, -1) {
paramName := match[1]
_, ok := featuresSet[paramName]
require.True(t, ok, fmt.Sprintf("env-controller managed feature %s should be added as an available feature", paramName))
_, ok = friendlyEnvFeatureName[paramName]
require.True(t, ok, fmt.Sprintf("env-controller managed feature %s should have a friendly feature name", paramName))
_, ok = leastVersionForFeature[paramName]
require.True(t, ok, fmt.Sprintf("should specify a least-required environment template version for the env-controller managed feature %s", paramName))
}
}
| 50 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
)
func TestTemplate_ParseEnv(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/environment", 0755)
_ = afero.WriteFile(fs, "templates/environment/cf.yml", []byte("test"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/cdn-resources.yml", []byte("cdn-resources"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/cfn-execution-role.yml", []byte("cfn-execution-role"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/custom-resources.yml", []byte("custom-resources"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/custom-resources-role.yml", []byte("custom-resources-role"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/environment-manager-role.yml", []byte("environment-manager-role"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/lambdas.yml", []byte("lambdas"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/vpc-resources.yml", []byte("vpc-resources"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/nat-gateways.yml", []byte("nat-gateways"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/bootstrap-resources.yml", []byte("bootstrap"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/elb-access-logs.yml", []byte("elb-access-logs"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/mappings-regional-configs.yml", []byte("mappings-regional-configs"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/ar-vpc-connector.yml", []byte("ar-vpc-connector"), 0644)
tpl := &Template{
fs: &mockFS{
Fs: fs,
},
}
// WHEN
c, err := tpl.ParseEnv(&EnvOpts{})
// THEN
require.NoError(t, err)
require.Equal(t, "test", c.String())
}
func TestTemplate_ParseEnvBootstrap(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/environment/partials", 0755)
_ = afero.WriteFile(fs, "templates/environment/bootstrap-cf.yml", []byte("test"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/cfn-execution-role.yml", []byte("cfn-execution-role"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/environment-manager-role.yml", []byte("environment-manager-role"), 0644)
_ = afero.WriteFile(fs, "templates/environment/partials/bootstrap-resources.yml", []byte("bootstrap"), 0644)
tpl := &Template{
fs: &mockFS{
Fs: fs,
},
}
// WHEN
c, err := tpl.ParseEnvBootstrap(&EnvOpts{})
// THEN
require.NoError(t, err)
require.Equal(t, "test", c.String())
}
func TestTruncate(t *testing.T) {
tests := map[string]struct {
s string
maxLen int
expected string
}{
"empty string": {
s: "",
maxLen: 10,
expected: "",
},
"maxLen < len(string)": {
s: "qwerty",
maxLen: 4,
expected: "qwer",
},
"maxLen > len(string)": {
s: "qwerty",
maxLen: 7,
expected: "qwerty",
},
"maxLen == len(string)": {
s: "qwerty",
maxLen: 6,
expected: "qwerty",
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.expected, truncate(tc.s, tc.maxLen))
})
}
}
| 102 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"fmt"
"sort"
"strings"
"unicode"
)
const (
cdkVersion = "2.56.0"
cdkConstructsMinVersion = "10.0.0"
cdkTemplatesPath = "overrides/cdk"
yamlPatchTemplatesPath = "overrides/yamlpatch"
)
var (
cdkAliasForService = map[string]string{
"ApiGatewayV2": "apigwv2",
"AppRunner": "ar",
"AutoScalingPlans": "asgplans",
"ApplicationAutoScaling": "appasg",
"AutoScaling": "asg",
"CertificateManager": "acm",
"CloudFormation": "cfn",
"CloudFront": "cf",
"ServiceDiscovery": "sd",
"CloudWatch": "cw",
"CodeBuild": "cb",
"CodePipeline": "cp",
"DynamoDB": "ddb",
"ElasticLoadBalancingV2": "elbv2",
"OpenSearchService": "oss",
"Route53": "r53",
"StepFunctions": "sfn",
}
)
// CFNResource represents a resource rendered in a CloudFormation template.
type CFNResource struct {
Type CFNType
LogicalID string
}
// CDKImport is the interface to import a CDK package.
type CDKImport interface {
ImportName() string
ImportShortRename() string
}
type cfnResources []CFNResource
// Imports returns a list of CDK imports for a given list of CloudFormation resources.
func (rs cfnResources) Imports() []CDKImport {
// Find a unique CFN type per service.
// For example, given "AWS::ECS::Service" and "AWS::ECS::TaskDef" we want to retain only one of them.
seen := make(map[string]CFNType)
for _, r := range rs {
serviceName := strings.Split(strings.ToLower(string(r.Type)), "::")[1]
if _, ok := seen[serviceName]; ok {
continue
}
seen[serviceName] = r.Type
}
imports := make([]CDKImport, len(seen))
i := 0
for _, resourceType := range seen {
imports[i] = resourceType
i += 1
}
sort.Slice(imports, func(i, j int) bool { // Ensure the output is deterministic for unit tests.
return imports[i].ImportShortRename() < imports[j].ImportShortRename()
})
return imports
}
// CFNType is a CloudFormation resource type such as "AWS::ECS::Service".
type CFNType string
// ImportName returns the name of the CDK package for the given CloudFormation type.
func (t CFNType) ImportName() string {
parts := strings.Split(strings.ToLower(string(t)), "::")
return fmt.Sprintf("aws_%s", parts[1])
}
// ImportShortRename returns a human-friendly shortened rename of the CDK package for the given CloudFormation type.
func (t CFNType) ImportShortRename() string {
parts := strings.Split(string(t), "::")
name := parts[1]
if rename, ok := cdkAliasForService[name]; ok {
return rename
}
return strings.ToLower(name)
}
// L1ConstructName returns the name of the L1 construct representing the CloudFormation type.
func (t CFNType) L1ConstructName() string {
parts := strings.Split(string(t), "::")
return fmt.Sprintf("Cfn%s", parts[2])
}
// WalkOverridesCDKDir walks through the overrides/cdk templates and calls fn for each parsed template file.
func (t *Template) WalkOverridesCDKDir(resources []CFNResource, fn WalkDirFunc) error {
type metadata struct {
Version string
ConstructsVersion string
Resources cfnResources
}
return t.walkDir(cdkTemplatesPath, cdkTemplatesPath, metadata{
Version: cdkVersion,
ConstructsVersion: cdkConstructsMinVersion,
Resources: resources,
}, fn, WithFuncs(
map[string]interface{}{
// transform all the initial capital letters into lower letters.
"lowerInitialLetters": func(serviceName string) string {
firstSmall := len(serviceName)
for i, r := range serviceName {
if unicode.IsLower(r) {
firstSmall = i
break
}
}
return strings.ToLower(serviceName[:firstSmall]) + serviceName[firstSmall:]
},
},
))
}
// WalkOverridesPatchDir walks through the overrides/yamlpatch templates and calls fn for each parsed template file.
func (t *Template) WalkOverridesPatchDir(fn WalkDirFunc) error {
return t.walkDir(yamlPatchTemplatesPath, yamlPatchTemplatesPath, struct{}{}, fn)
}
| 140 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
)
func TestCFNType_ImportName(t *testing.T) {
require.Equal(t, "aws_autoscaling", CFNType("AWS::AutoScaling::AutoScalingGroup").ImportName())
}
func TestCFNType_ImportShortRename(t *testing.T) {
testCases := []struct {
in string
wanted string
}{
{
in: "AWS::AutoScaling::AutoScalingGroup",
wanted: "asg",
},
{
in: "AWS::Logs::LogGroup",
wanted: "logs",
},
{
in: "AWS::ECS::Service",
wanted: "ecs",
},
{
in: "AWS::DynamoDB::Table",
wanted: "ddb",
},
{
in: "AWS::ApiGatewayV2::Api",
wanted: "apigwv2",
},
{
in: "AWS::EC2::CapacityReservation",
wanted: "ec2",
},
{
in: "AWS::ElasticLoadBalancingV2::Listener",
wanted: "elbv2",
},
}
for _, tc := range testCases {
require.Equal(t, tc.wanted, CFNType(tc.in).ImportShortRename(), "unexpected short name for %q", tc.in)
}
}
func TestCFNType_L1ConstructName(t *testing.T) {
require.Equal(t, "CfnAutoScalingGroup", CFNType("AWS::AutoScaling::AutoScalingGroup").L1ConstructName())
}
type cdkImportTestDouble struct {
importNameFn func() string
importShortRename func() string
}
// Assert that cdkImportTestDouble implements the CDKImport interface.
var _ CDKImport = (*cdkImportTestDouble)(nil)
func (td *cdkImportTestDouble) ImportName() string {
return td.importNameFn()
}
func (td *cdkImportTestDouble) ImportShortRename() string {
return td.importShortRename()
}
func TestCfnResources_Imports(t *testing.T) {
// GIVEN
resources := cfnResources([]CFNResource{
{
Type: "AWS::IAM::Role",
},
{
Type: "AWS::ECS::Cluster",
},
{
Type: "AWS::IAM::Role",
},
{
Type: "AWS::ECR::Repository",
},
{
Type: "AWS::ECR::Repository",
},
{
Type: "AWS::ECS::Service",
},
})
wanted := []CDKImport{
&cdkImportTestDouble{
importNameFn: func() string {
return "aws_ecr"
},
importShortRename: func() string {
return "ecr"
},
},
&cdkImportTestDouble{
importNameFn: func() string {
return "aws_ecs"
},
importShortRename: func() string {
return "ecs"
},
},
&cdkImportTestDouble{
importNameFn: func() string {
return "aws_iam"
},
importShortRename: func() string {
return "iam"
},
},
}
// WHEN
imports := resources.Imports()
// THEN
require.Equal(t, len(imports), len(wanted), "expected number of imports to be equal")
for i, lib := range imports {
require.Equal(t, wanted[i].ImportName(), lib.ImportName(), "expected import names to match")
require.Equal(t, wanted[i].ImportShortRename(), lib.ImportShortRename(), "expected import short renames to match")
}
}
func TestTemplate_WalkOverridesCDKDir(t *testing.T) {
// GIVEN
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/overrides/cdk/bin", 0755)
_ = afero.WriteFile(fs, "templates/overrides/cdk/bin/app.js", []byte(`const app = new cdk.App();`), 0644)
_ = afero.WriteFile(fs, "templates/overrides/cdk/package.json", []byte(`{
"devDependencies": {
"aws-cdk": "{{.Version}}",
"ts-node": "^10.9.1",
"typescript": "~4.9.4"
},
"dependencies": {
"aws-cdk-lib": "{{.Version}}",
"constructs": "^{{.ConstructsVersion}}",
"source-map-support": "^0.5.21"
}
}`), 0644)
_ = afero.WriteFile(fs, "templates/overrides/cdk/stack.ts", []byte(`{{- range $import := .Resources.Imports }}
import { {{$import.ImportName}} as {{$import.ImportShortRename}} } from 'aws-cdk-lib';
{{- end }}
{{range $resource := .Resources}}
transform{{$resource.LogicalID}}() {
const {{lowerInitialLetters $resource.LogicalID}} = this.template.getResource("{{$resource.LogicalID}}") as {{$resource.Type.ImportShortRename}}.{{$resource.Type.L1ConstructName}};
}
{{end}}
`), 0644)
tpl := &Template{
fs: &mockFS{Fs: fs},
}
input := []CFNResource{
{
Type: "AWS::ECS::Service",
LogicalID: "Service",
},
{
Type: "AWS::ElasticLoadBalancingV2::ListenerRule",
LogicalID: "HTTPListenerRuleWithDomain",
},
{
Type: "AWS::ElasticLoadBalancingV2::ListenerRule",
LogicalID: "HTTPListenerRule",
},
}
// WHEN
walked := map[string]bool{
"package.json": false,
"stack.ts": false,
"bin/app.js": false,
}
err := tpl.WalkOverridesCDKDir(input, func(name string, content *Content) error {
switch name {
case "package.json":
walked["package.json"] = true
require.Equal(t, `{
"devDependencies": {
"aws-cdk": "2.56.0",
"ts-node": "^10.9.1",
"typescript": "~4.9.4"
},
"dependencies": {
"aws-cdk-lib": "2.56.0",
"constructs": "^10.0.0",
"source-map-support": "^0.5.21"
}
}`, content.String())
case "stack.ts":
walked["stack.ts"] = true
require.Equal(t, `
import { aws_ecs as ecs } from 'aws-cdk-lib';
import { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';
transformService() {
const service = this.template.getResource("Service") as ecs.CfnService;
}
transformHTTPListenerRuleWithDomain() {
const httplistenerRuleWithDomain = this.template.getResource("HTTPListenerRuleWithDomain") as elbv2.CfnListenerRule;
}
transformHTTPListenerRule() {
const httplistenerRule = this.template.getResource("HTTPListenerRule") as elbv2.CfnListenerRule;
}
`, content.String())
case "bin/app.js":
walked["bin/app.js"] = true
require.Equal(t, "const app = new cdk.App();", content.String())
}
return nil
})
// THEN
require.NoError(t, err)
for name, ok := range walked {
if !ok {
require.FailNowf(t, "missing walk file", "file %q was not walked", name)
}
}
}
| 240 |
copilot-cli | aws | Go | //go:build integration || localintegration
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"bytes"
"github.com/stretchr/testify/require"
"io/fs"
"os"
"path/filepath"
"testing"
)
func TestPermissions_Boundary(t *testing.T) {
t.Run("every CloudFormation template must contain conditional permissions boundary field for all IAM roles", func(t *testing.T) {
err := filepath.WalkDir("templates", func(path string, di fs.DirEntry, err error) error {
if !di.IsDir() {
contents, err := os.ReadFile(path)
require.NoError(t, err, "read file at %s", path)
roleCount := bytes.Count(contents, []byte("AWS::IAM::Role"))
pbFieldCount := bytes.Count(contents, []byte("PermissionsBoundary:"))
require.Equal(t, roleCount, pbFieldCount, "number of IAM roles does not equal number of permissions boundary fields in file '%s'", path)
}
return nil
})
require.NoError(t, err, "should walk templates dir for template files")
})
}
| 33 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package template renders the static files under the "/templates/" directory.
package template
import (
"bytes"
"embed"
"fmt"
"io/fs"
"path"
"path/filepath"
"strings"
"text/template"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
"github.com/aws/copilot-cli/internal/pkg/template/artifactpath"
)
//go:embed templates templates/overrides/cdk/.gitignore
var templateFS embed.FS
// File names under "templates/".
const (
DNSCertValidatorFileName = "dns-cert-validator"
CertReplicatorFileName = "cert-replicator"
DNSDelegationFileName = "dns-delegation"
CustomDomainFileName = "custom-domain"
AppRunnerCustomDomainLambdaFileName = "custom-domain-app-runner"
customResourceRootPath = "custom-resources"
customResourceZippedScriptName = "index.js"
scriptDirName = "scripts"
)
// AddonsStackLogicalID is the logical ID for the addon stack resource in the main template.
const AddonsStackLogicalID = "AddonsStack"
// Groups of files that belong to the same stack.
var (
envCustomResourceFiles = []string{
DNSCertValidatorFileName,
CertReplicatorFileName,
DNSDelegationFileName,
CustomDomainFileName,
}
)
// Reader is the interface that wraps the Read method.
type Reader interface {
Read(path string) (*Content, error)
}
// Parser is the interface that wraps the Parse method.
type Parser interface {
Parse(path string, data interface{}, options ...ParseOption) (*Content, error)
}
// ReadParser is the interface that wraps the Read and Parse methods.
type ReadParser interface {
Reader
Parser
}
// Uploadable is an uploadable file.
type Uploadable struct {
name string
content []byte
path string
}
// Name returns the name of the custom resource script.
func (e Uploadable) Name() string {
return e.name
}
// Content returns the content of the custom resource script.
func (e Uploadable) Content() []byte {
return e.content
}
type fileToCompress struct {
name string
uploadables []Uploadable
}
type osFS interface {
fs.ReadDirFS
fs.ReadFileFS
}
// Template represents the "/templates/" directory that holds static files to be embedded in the binary.
type Template struct {
fs osFS
}
// New returns a Template object that can be used to parse files under the "/templates/" directory.
func New() *Template {
return &Template{
fs: templateFS,
}
}
// Read returns the contents of the template under "/templates/{path}".
func (t *Template) Read(path string) (*Content, error) {
s, err := t.read(path)
if err != nil {
return nil, err
}
return &Content{
Buffer: bytes.NewBufferString(s),
}, nil
}
// Parse parses the template under "/templates/{path}" with the specified data object and returns its content.
func (t *Template) Parse(path string, data interface{}, options ...ParseOption) (*Content, error) {
tpl, err := t.parse("template", path, options...)
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, data); err != nil {
return nil, fmt.Errorf("execute template %s: %w", path, err)
}
return &Content{buf}, nil
}
// UploadEnvironmentCustomResources uploads the environment custom resource scripts.
func (t *Template) UploadEnvironmentCustomResources(upload s3.CompressAndUploadFunc) (map[string]string, error) {
return t.uploadCustomResources(upload, envCustomResourceFiles)
}
func (t *Template) uploadCustomResources(upload s3.CompressAndUploadFunc, fileNames []string) (map[string]string, error) {
urls := make(map[string]string)
for _, name := range fileNames {
url, err := t.uploadFileToCompress(upload, fileToCompress{
name: path.Join(scriptDirName, name),
uploadables: []Uploadable{
{
name: customResourceZippedScriptName,
path: path.Join(customResourceRootPath, fmt.Sprintf("%s.js", name)),
},
},
})
if err != nil {
return nil, err
}
urls[name] = url
}
return urls, nil
}
func (t *Template) uploadFileToCompress(upload s3.CompressAndUploadFunc, file fileToCompress) (string, error) {
var contents []byte
var nameBinaries []s3.NamedBinary
for _, uploadable := range file.uploadables {
content, err := t.Read(uploadable.path)
if err != nil {
return "", err
}
uploadable.content = content.Bytes()
contents = append(contents, uploadable.content...)
nameBinaries = append(nameBinaries, uploadable)
}
// Prefix with a SHA256 checksum of the fileToCompress so that
// only new content gets a new URL. Otherwise, if two fileToCompress have the
// same content then the URL generated will be identical.
url, err := upload(artifactpath.MkdirSHA256(file.name, contents), nameBinaries...)
if err != nil {
return "", fmt.Errorf("upload %s: %w", file.name, err)
}
return url, nil
}
// ParseOption represents a functional option for the Parse method.
type ParseOption func(t *template.Template) *template.Template
// WithFuncs returns a template that can parse additional custom functions.
func WithFuncs(fns map[string]interface{}) ParseOption {
return func(t *template.Template) *template.Template {
return t.Funcs(fns)
}
}
// Content represents the parsed template.
type Content struct {
*bytes.Buffer
}
// MarshalBinary returns the contents as binary and implements the encoding.BinaryMarshaler interface.
func (c *Content) MarshalBinary() ([]byte, error) {
return c.Bytes(), nil
}
// newTextTemplate returns a named text/template with the "indent" and "include" functions.
func newTextTemplate(name string) *template.Template {
t := template.New(name)
t.Funcs(map[string]interface{}{
"include": func(name string, data interface{}) (string, error) {
// Taken from https://github.com/helm/helm/blob/8648ccf5d35d682dcd5f7a9c2082f0aaf071e817/pkg/engine/engine.go#L147-L154
buf := bytes.NewBuffer(nil)
if err := t.ExecuteTemplate(buf, name, data); err != nil {
return "", err
}
return buf.String(), nil
},
"indent": func(spaces int, s string) string {
// Taken from https://github.com/Masterminds/sprig/blob/48e6b77026913419ba1a4694dde186dc9c4ad74d/strings.go#L109-L112
pad := strings.Repeat(" ", spaces)
return pad + strings.Replace(s, "\n", "\n"+pad, -1)
},
})
return t
}
func (t *Template) read(path string) (string, error) {
dat, err := t.fs.ReadFile(filepath.ToSlash(filepath.Join("templates", path))) // We need to use "/" even on Windows with go:embed.
if err != nil {
return "", fmt.Errorf("read template %s: %w", path, err)
}
return string(dat), nil
}
// parse reads the file at path and returns a parsed text/template object with the given name.
func (t *Template) parse(name, path string, options ...ParseOption) (*template.Template, error) {
content, err := t.read(path)
if err != nil {
return nil, err
}
emptyTextTpl := newTextTemplate(name)
for _, opt := range options {
emptyTextTpl = opt(emptyTextTpl)
}
parsedTpl, err := emptyTextTpl.Parse(content)
if err != nil {
return nil, fmt.Errorf("parse template %s: %w", path, err)
}
return parsedTpl, nil
}
// WalkDirFunc is the type of the function called by any Walk functions while visiting each file under a directory.
type WalkDirFunc func(name string, content *Content) error
func (t *Template) walkDir(basePath, curPath string, data any, fn WalkDirFunc, parseOpts ...ParseOption) error {
entries, err := t.fs.ReadDir(path.Join("templates", curPath))
if err != nil {
return fmt.Errorf("read dir %q: %w", curPath, err)
}
for _, entry := range entries {
targetPath := path.Join(curPath, entry.Name())
if entry.IsDir() {
if err := t.walkDir(basePath, targetPath, data, fn); err != nil {
return err
}
continue
}
content, err := t.Parse(targetPath, data, parseOpts...)
if err != nil {
return err
}
relPath, err := filepath.Rel(basePath, targetPath)
if err != nil {
return err
}
if err := fn(relPath, content); err != nil {
return err
}
}
return nil
}
| 273 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws"
)
const (
dashReplacement = "DASH"
)
// ReplaceDashesFunc takes a CloudFormation logical ID, and
// sanitizes it by removing "-" characters (not allowed)
// and replacing them with "DASH" (allowed by CloudFormation but
// not permitted in ecs-cli generated resource names).
func ReplaceDashesFunc(logicalID string) string {
return strings.ReplaceAll(logicalID, "-", dashReplacement)
}
// IsARNFunc takes a string value and determines if it's an ARN or not.
func IsARNFunc(value string) bool {
return arn.IsARN(value)
}
// TrimSlashPrefix takes a string value and removes slash prefix from the string if present.
func TrimSlashPrefix(value string) string {
return strings.TrimPrefix(value, "/")
}
// DashReplacedLogicalIDToOriginal takes a "sanitized" logical ID
// and converts it back to its original form, with dashes.
func DashReplacedLogicalIDToOriginal(safeLogicalID string) string {
return strings.ReplaceAll(safeLogicalID, dashReplacement, "-")
}
var nonAlphaNum = regexp.MustCompile("[^a-zA-Z0-9]+")
// StripNonAlphaNumFunc strips non-alphanumeric characters from an input string.
func StripNonAlphaNumFunc(s string) string {
return nonAlphaNum.ReplaceAllString(s, "")
}
// EnvVarNameFunc converts an input resource name to LogicalIDSafe, then appends
// "Name" to the end.
func EnvVarNameFunc(s string) string {
return StripNonAlphaNumFunc(s) + "Name"
}
// HasCustomIngress returns true if there is any ingress specified by the customer.
func (cfg *PublicHTTPConfig) HasCustomIngress() bool {
return len(cfg.PublicALBSourceIPs) > 0 || len(cfg.CIDRPrefixListIDs) > 0
}
// IsFIFO checks if the given queue has FIFO config.
func (s SQSQueue) IsFIFO() bool {
return s.FIFOQueueConfig != nil
}
// EnvVarSecretFunc converts an input resource name to LogicalIDSafe, then appends
// "Secret" to the end.
func EnvVarSecretFunc(s string) string {
return StripNonAlphaNumFunc(s) + "Secret"
}
// Grabs word boundaries in default CamelCase. Matches lowercase letters & numbers
// before the next capital as capturing group 1, and the first capital in the
// next word as capturing group 2. Will match "yC" in "MyCamel" and "y2ndC" in"My2ndCamel"
var lowerUpperRegexp = regexp.MustCompile("([a-z0-9]+)([A-Z])")
// Grabs word boundaries of the form "DD[B][In]dex". Matches the last uppercase
// letter of an acronym as CG1, and the next uppercase + lowercase combo (indicating
// a new word) as CG2. Will match "BTa" in"MyDDBTableWithLSI" or "2Wi" in"MyDDB2WithLSI"
var upperLowerRegexp = regexp.MustCompile("([A-Z0-9])([A-Z][a-z])")
// ToSnakeCaseFunc transforms a CamelCase input string s into an upper SNAKE_CASE string and returns it.
// For example, "usersDdbTableName" becomes "USERS_DDB_TABLE_NAME".
func ToSnakeCaseFunc(s string) string {
sSnake := lowerUpperRegexp.ReplaceAllString(s, "${1}_${2}")
sSnake = upperLowerRegexp.ReplaceAllString(sSnake, "${1}_${2}")
return strings.ToUpper(sSnake)
}
// IncFunc increments an integer value and returns the result.
func IncFunc(i int) int { return i + 1 }
// FmtSliceFunc renders a string representation of a go string slice, surrounded by brackets
// and joined by commas.
func FmtSliceFunc(elems []string) string {
return fmt.Sprintf("[%s]", strings.Join(elems, ", "))
}
// QuoteSliceFunc places quotation marks around all elements of a go string slice.
func QuoteSliceFunc(elems []string) []string {
if len(elems) == 0 {
return nil
}
quotedElems := make([]string, len(elems))
for i, el := range elems {
quotedElems[i] = strconv.Quote(el)
}
return quotedElems
}
// generateMountPointJSON turns a list of MountPoint objects into a JSON string:
// `{"myEFSVolume": "/var/www", "myEBSVolume": "/usr/data"}`
// This function must be called on an array of correctly constructed MountPoint objects.
func generateMountPointJSON(mountPoints []*MountPoint) string {
volumeMap := make(map[string]string)
for _, mp := range mountPoints {
// Skip adding mount points with empty container paths to the map.
// This is validated elsewhere so this condition should never happen, but it
// will fail to inject mountpoints with empty paths.
if aws.StringValue(mp.ContainerPath) == "" {
continue
}
volumeMap[aws.StringValue(mp.SourceVolume)] = aws.StringValue(mp.ContainerPath)
}
out, ok := getJSONMap(volumeMap)
if !ok {
return "{}"
}
return string(out)
}
// generatePublisherJSON turns a list of Topics objects into a JSON string:
// `{"myTopic": "topicArn", "mySecondTopic": "secondTopicArn"}`
// This function must be called on an array of correctly constructed Topic objects.
func generateSNSJSON(topics []*Topic) string {
if topics == nil {
return ""
}
topicMap := make(map[string]string)
for _, topic := range topics {
// Topics with no name will not be included in the json
if topic.Name == nil {
continue
}
topicMap[aws.StringValue(topic.Name)] = topic.ARN()
}
out, ok := getJSONMap(topicMap)
if !ok {
return "{}"
}
return string(out)
}
// generateQueueURIJSON turns a list of Topic Subscription objects into a JSON string of their corresponding queues:
// `{"svcTopicEventsQueue": "${svctopicURL}"}`
// This function must be called on an array of correctly constructed Topic objects.
func generateQueueURIJSON(ts []*TopicSubscription) string {
if ts == nil {
return ""
}
urlMap := make(map[string]string)
for _, sub := range ts {
// TopicSubscriptions with no name, service, or queue will not be included in the json
if sub.Name == nil || sub.Service == nil || sub.Queue == nil {
continue
}
svc := StripNonAlphaNumFunc(aws.StringValue(sub.Service))
topicName := StripNonAlphaNumFunc(aws.StringValue(sub.Name))
subName := fmt.Sprintf("%s%sEventsQueue", svc, cases.Title(language.English).String(topicName))
urlMap[subName] = fmt.Sprintf("${%s%sURL}", svc, topicName)
}
out, ok := getJSONMap(urlMap)
if !ok {
return "{}"
}
return string(out)
}
func getJSONMap(inMap map[string]string) ([]byte, bool) {
// Check for empty maps
if len(inMap) == 0 {
return nil, false
}
out, err := json.Marshal(inMap)
if err != nil {
return nil, false
}
return out, true
}
| 208 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/require"
)
func TestReplaceDashesFunc(t *testing.T) {
testCases := map[string]struct {
in string
wanted string
}{
"no dashes": {
in: "mycooltable",
wanted: "mycooltable",
},
"has dash": {
in: "my-table",
wanted: "myDASHtable",
},
"has multiple dashes": {
in: "my--dog-table",
wanted: "myDASHDASHdogDASHtable",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, ReplaceDashesFunc(tc.in))
})
}
}
func TestDashReplacedLogicalIDToOriginal(t *testing.T) {
testCases := map[string]struct {
in string
wanted string
}{
"no dashes": {
in: "mycooltable",
wanted: "mycooltable",
},
"has dash": {
in: "myDASHtable",
wanted: "my-table",
},
"has multiple dashes": {
in: "myDASHDASHdogDASHtable",
wanted: "my--dog-table",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, DashReplacedLogicalIDToOriginal(tc.in))
})
}
}
func TestStripNonAlphaNumFunc(t *testing.T) {
testCases := map[string]struct {
in string
wanted string
}{
"all alphanumeric": {
in: "MyCoolTable5",
wanted: "MyCoolTable5",
},
"ddb-allowed special characters": {
in: "My_Table-Name.5",
wanted: "MyTableName5",
},
"s3-allowed special characters": {
in: "my-bucket-5",
wanted: "mybucket5",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, StripNonAlphaNumFunc(tc.in))
})
}
}
func TestEnvVarNameFunc(t *testing.T) {
testCases := map[string]struct {
in string
wanted string
}{
"all alphanumeric": {
in: "MyCoolTable5",
wanted: "MyCoolTable5Name",
},
"ddb-allowed special characters": {
in: "My_Table-Name.5",
wanted: "MyTableName5Name",
},
"s3-allowed special characters": {
in: "my-bucket-5",
wanted: "mybucket5Name",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, EnvVarNameFunc(tc.in))
})
}
}
func TestToSnakeCaseFunc(t *testing.T) {
testCases := map[string]struct {
in string
wanted string
}{
"camel case: starts with uppercase": {
in: "AdditionalResourcesPolicyArn",
wanted: "ADDITIONAL_RESOURCES_POLICY_ARN",
},
"camel case: starts with lowercase": {
in: "additionalResourcesPolicyArn",
wanted: "ADDITIONAL_RESOURCES_POLICY_ARN",
},
"all lower case": {
in: "myddbtable",
wanted: "MYDDBTABLE",
},
"has capitals in acronym": {
in: "myDDBTable",
wanted: "MY_DDB_TABLE",
},
"has capitals and numbers": {
in: "my2ndDDBTable",
wanted: "MY2ND_DDB_TABLE",
},
"has capitals at end": {
in: "myTableWithLSI",
wanted: "MY_TABLE_WITH_LSI",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, ToSnakeCaseFunc(tc.in))
})
}
}
func TestIncFunc(t *testing.T) {
testCases := map[string]struct {
in int
wanted int
}{
"negative": {
in: -1,
wanted: 0,
},
"large negative": {
in: -32767,
wanted: -32766,
},
"large positive": {
in: 4294967296,
wanted: 4294967297,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, IncFunc(tc.in))
})
}
}
func TestFmtSliceFunc(t *testing.T) {
testCases := map[string]struct {
in []string
wanted string
}{
"simple case": {
in: []string{"my", "elements", "go", "here"},
wanted: "[my, elements, go, here]",
},
"no elements": {
in: []string{},
wanted: "[]",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, FmtSliceFunc(tc.in))
})
}
}
func TestQuoteSliceFunc(t *testing.T) {
testCases := map[string]struct {
in []string
wanted []string
}{
"simple case": {
in: []string{"my", "elements", "go", "here"},
wanted: []string{"\"my\"", "\"elements\"", "\"go\"", "\"here\""},
},
"no elements": {
in: []string{},
wanted: []string(nil),
},
"nil input": {
in: []string(nil),
wanted: []string(nil),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, QuoteSliceFunc(tc.in))
})
}
}
func TestGenerateMountPointJSON(t *testing.T) {
require.Equal(t, `{"myEFSVolume":"/var/www"}`, generateMountPointJSON([]*MountPoint{{ContainerPath: aws.String("/var/www"), SourceVolume: aws.String("myEFSVolume")}}), "JSON should render correctly")
require.Equal(t, "{}", generateMountPointJSON([]*MountPoint{}), "nil list of arguments should render ")
require.Equal(t, "{}", generateMountPointJSON([]*MountPoint{{SourceVolume: aws.String("fromEFS")}}), "empty paths should not get injected")
}
func TestGenerateSNSJSON(t *testing.T) {
testCases := map[string]struct {
in []*Topic
wanted string
}{
"JSON should render correctly": {
in: []*Topic{
{
Name: aws.String("tests"),
AccountID: "123456789012",
Region: "us-west-2",
Partition: "aws",
App: "appName",
Env: "envName",
Svc: "svcName",
},
},
wanted: `{"tests":"arn:aws:sns:us-west-2:123456789012:appName-envName-svcName-tests"}`,
},
"Topics with no names show empty": {
in: []*Topic{
{
AccountID: "123456789012",
Region: "us-west-2",
Partition: "aws",
App: "appName",
Env: "envName",
Svc: "svcName",
},
},
wanted: `{}`,
},
"nil list of arguments should render": {
in: []*Topic{},
wanted: `{}`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, generateSNSJSON(tc.in))
})
}
}
func TestGenerateQueueURIJSON(t *testing.T) {
testCases := map[string]struct {
in []*TopicSubscription
wanted string
wantedSubstring string
}{
"JSON should render correctly": {
in: []*TopicSubscription{
{
Name: aws.String("tests"),
Service: aws.String("bestsvc"),
Queue: &SQSQueue{
Delay: aws.Int64(5),
},
},
},
wantedSubstring: `"bestsvcTestsEventsQueue":"${bestsvctestsURL}"`,
},
"Topics with no names show empty but main queue still populates": {
in: []*TopicSubscription{
{
Service: aws.String("bestSvc"),
},
},
wanted: `{}`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
if tc.wanted != "" {
require.Equal(t, generateQueueURIJSON(tc.in), tc.wanted)
} else {
require.Contains(t, generateQueueURIJSON(tc.in), tc.wantedSubstring)
}
})
}
}
| 317 |
copilot-cli | aws | Go | //go:build integration
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template_test
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestTemplate_ParseScheduledJob(t *testing.T) {
customResources := map[string]template.S3ObjectLocation{
"EnvControllerFunction": {
Bucket: "my-bucket",
Key: "key",
},
}
testCases := map[string]struct {
opts template.WorkloadOpts
}{
"renders a valid template by default": {
opts: template.WorkloadOpts{
ServiceDiscoveryEndpoint: "test.app.local",
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders with timeout and no retries": {
opts: template.WorkloadOpts{
StateMachine: &template.StateMachineOpts{
Timeout: aws.Int(3600),
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ServiceDiscoveryEndpoint: "test.app.local",
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders with options": {
opts: template.WorkloadOpts{
StateMachine: &template.StateMachineOpts{
Retries: aws.Int(5),
Timeout: aws.Int(3600),
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ServiceDiscoveryEndpoint: "test.app.local",
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders with options and addons": {
opts: template.WorkloadOpts{
StateMachine: &template.StateMachineOpts{
Retries: aws.Int(3),
},
NestedStack: &template.WorkloadNestedStackOpts{
StackName: "AddonsStack",
VariableOutputs: []string{"TableName"},
SecretOutputs: []string{"TablePassword"},
PolicyOutputs: []string{"TablePolicy"},
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ServiceDiscoveryEndpoint: "test.app.local",
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders with Windows platform": {
opts: template.WorkloadOpts{
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
Platform: template.RuntimePlatformOpts{
OS: "windows",
Arch: "x86_64",
},
ServiceDiscoveryEndpoint: "test.app.local",
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
sess, err := sessions.ImmutableProvider().Default()
require.NoError(t, err)
cfn := cloudformation.New(sess)
tpl := template.New()
// WHEN
content, err := tpl.ParseScheduledJob(tc.opts)
require.NoError(t, err)
// THEN
_, err = cfn.ValidateTemplate(&cloudformation.ValidateTemplateInput{
TemplateBody: aws.String(content.String()),
})
require.NoError(t, err, content.String())
})
}
}
func TestTemplate_ParseLoadBalancedWebService(t *testing.T) {
defaultHttpHealthCheck := template.HTTPHealthCheckOpts{
HealthCheckPath: "/",
}
fakeS3Object := template.S3ObjectLocation{
Bucket: "my-bucket",
Key: "key",
}
customResources := map[string]template.S3ObjectLocation{
"DynamicDesiredCountFunction": fakeS3Object,
"EnvControllerFunction": fakeS3Object,
"RulePriorityFunction": fakeS3Object,
"NLBCustomDomainFunction": fakeS3Object,
"NLBCertValidatorFunction": fakeS3Object,
}
testCases := map[string]struct {
opts template.WorkloadOpts
}{
"renders a valid template by default": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
ServiceDiscoveryEndpoint: "test.app.local",
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid grpc template by default": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
ServiceDiscoveryEndpoint: "test.app.local",
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with addons with no outputs": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
NestedStack: &template.WorkloadNestedStackOpts{
StackName: "AddonsStack",
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ServiceDiscoveryEndpoint: "test.app.local",
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with addons with outputs": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
NestedStack: &template.WorkloadNestedStackOpts{
StackName: "AddonsStack",
VariableOutputs: []string{"TableName"},
SecretOutputs: []string{"TablePassword"},
PolicyOutputs: []string{"TablePolicy"},
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ServiceDiscoveryEndpoint: "test.app.local",
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with private subnet placement": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
Network: template.NetworkOpts{
AssignPublicIP: template.DisablePublicIP,
SubnetsType: template.PrivateSubnetsPlacement,
},
ServiceDiscoveryEndpoint: "test.app.local",
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with all storage options": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
ServiceDiscoveryEndpoint: "test.app.local",
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
Storage: &template.StorageOpts{
Ephemeral: aws.Int(500),
EFSPerms: []*template.EFSPermission{
{
AccessPointID: aws.String("ap-1234"),
FilesystemID: aws.String("fs-5678"),
Write: true,
},
},
MountPoints: []*template.MountPoint{
{
SourceVolume: aws.String("efs"),
ContainerPath: aws.String("/var/www"),
ReadOnly: aws.Bool(false),
},
},
Volumes: []*template.Volume{
{
EFS: &template.EFSVolumeConfiguration{
AccessPointID: aws.String("ap-1234"),
Filesystem: aws.String("fs-5678"),
IAM: aws.String("ENABLED"),
RootDirectory: aws.String("/"),
},
Name: aws.String("efs"),
},
},
},
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with minimal storage options": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
ServiceDiscoveryEndpoint: "test.app.local",
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
Storage: &template.StorageOpts{
EFSPerms: []*template.EFSPermission{
{
FilesystemID: aws.String("fs-5678"),
},
},
MountPoints: []*template.MountPoint{
{
SourceVolume: aws.String("efs"),
ContainerPath: aws.String("/var/www"),
ReadOnly: aws.Bool(true),
},
},
Volumes: []*template.Volume{
{
Name: aws.String("efs"),
EFS: &template.EFSVolumeConfiguration{
Filesystem: aws.String("fs-5678"),
RootDirectory: aws.String("/"),
},
},
},
},
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with ephemeral storage": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ServiceDiscoveryEndpoint: "test.app.local",
Storage: &template.StorageOpts{
Ephemeral: aws.Int(500),
},
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with entrypoint and command overrides": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
EntryPoint: []string{"/bin/echo", "hello"},
Command: []string{"world"},
ServiceDiscoveryEndpoint: "test.app.local",
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with additional addons parameters": {
opts: template.WorkloadOpts{
ServiceDiscoveryEndpoint: "test.app.local",
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
AddonsExtraParams: `ServiceName: !Ref Service
DiscoveryServiceArn:
Fn::GetAtt: [DiscoveryService, Arn]
`,
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
"renders a valid template with Windows platform": {
opts: template.WorkloadOpts{
ALBListener: &template.ALBListener{
Rules: []template.ALBListenerRule{
{
Path: "/",
TargetPort: "8080",
TargetContainer: "main",
HTTPVersion: "GRPC",
HTTPHealthCheck: defaultHttpHealthCheck,
Stickiness: "false",
},
},
},
Network: template.NetworkOpts{
AssignPublicIP: template.EnablePublicIP,
SubnetsType: template.PublicSubnetsPlacement,
},
Platform: template.RuntimePlatformOpts{
OS: "windows",
Arch: "x86_64",
},
ServiceDiscoveryEndpoint: "test.app.local",
ALBEnabled: true,
CustomResources: customResources,
EnvVersion: "v1.42.0",
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
sess, err := sessions.ImmutableProvider().Default()
require.NoError(t, err)
cfn := cloudformation.New(sess)
tpl := template.New()
// WHEN
content, err := tpl.ParseLoadBalancedWebService(tc.opts)
require.NoError(t, err)
// THEN
_, err = cfn.ValidateTemplate(&cloudformation.ValidateTemplateInput{
TemplateBody: aws.String(content.String()),
})
require.NoError(t, err, content.String())
})
}
}
func TestTemplate_ParseNetwork(t *testing.T) {
type cfn struct {
Resources struct {
Service struct {
Properties struct {
NetworkConfiguration map[interface{}]interface{} `yaml:"NetworkConfiguration"`
} `yaml:"Properties"`
} `yaml:"Service"`
} `yaml:"Resources"`
}
testCases := map[string]struct {
input template.NetworkOpts
wantedNetworkConfig string
}{
"should render AWS VPC configuration for private subnets": {
input: template.NetworkOpts{
AssignPublicIP: "DISABLED",
SubnetsType: "PrivateSubnets",
},
wantedNetworkConfig: `
AwsvpcConfiguration:
AssignPublicIp: DISABLED
Subnets:
Fn::Split:
- ','
- Fn::ImportValue: !Sub '${AppName}-${EnvName}-PrivateSubnets'
SecurityGroups:
- Fn::ImportValue: !Sub '${AppName}-${EnvName}-EnvironmentSecurityGroup'
`,
},
"should render AWS VPC configuration for private subnets with security groups": {
input: template.NetworkOpts{
AssignPublicIP: "DISABLED",
SubnetsType: "PrivateSubnets",
SecurityGroups: []template.SecurityGroup{
template.PlainSecurityGroup("sg-1bcf1d5b"),
template.PlainSecurityGroup("sg-asdasdas"),
template.ImportedSecurityGroup("mydb-sg001"),
},
},
wantedNetworkConfig: `
AwsvpcConfiguration:
AssignPublicIp: DISABLED
Subnets:
Fn::Split:
- ','
- Fn::ImportValue: !Sub '${AppName}-${EnvName}-PrivateSubnets'
SecurityGroups:
- Fn::ImportValue: !Sub '${AppName}-${EnvName}-EnvironmentSecurityGroup'
- "sg-1bcf1d5b"
- "sg-asdasdas"
- Fn::ImportValue: mydb-sg001
`,
},
"should render AWS VPC configuration without default environment security group": {
input: template.NetworkOpts{
AssignPublicIP: "DISABLED",
SubnetsType: "PrivateSubnets",
SecurityGroups: []template.SecurityGroup{
template.PlainSecurityGroup("sg-1bcf1d5b"),
template.PlainSecurityGroup("sg-asdasdas"),
},
DenyDefaultSecurityGroup: true,
},
wantedNetworkConfig: `
AwsvpcConfiguration:
AssignPublicIp: DISABLED
Subnets:
Fn::Split:
- ','
- Fn::ImportValue: !Sub '${AppName}-${EnvName}-PrivateSubnets'
SecurityGroups:
- "sg-1bcf1d5b"
- "sg-asdasdas"
`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
tpl := template.New()
wanted := make(map[interface{}]interface{})
err := yaml.Unmarshal([]byte(tc.wantedNetworkConfig), &wanted)
require.NoError(t, err, "unmarshal wanted config")
// WHEN
content, err := tpl.ParseLoadBalancedWebService(template.WorkloadOpts{
Network: tc.input,
})
// THEN
require.NoError(t, err, "parse load balanced web service")
var actual cfn
err = yaml.Unmarshal(content.Bytes(), &actual)
require.NoError(t, err, "unmarshal actual config")
require.Equal(t, wanted, actual.Resources.Service.Properties.NetworkConfiguration)
})
}
}
| 608 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"errors"
"fmt"
"io/fs"
"os"
"testing"
"github.com/spf13/afero"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
"github.com/stretchr/testify/require"
)
// mockFS implements the fs.ReadFileFS interface.
type mockFS struct {
afero.Fs
}
func (m *mockFS) ReadFile(name string) ([]byte, error) {
return afero.ReadFile(m.Fs, name)
}
func (m *mockFS) ReadDir(name string) ([]fs.DirEntry, error) {
files, err := afero.ReadDir(m.Fs, name)
if err != nil {
return nil, err
}
out := make([]fs.DirEntry, len(files))
for i, f := range files {
out[i] = &mockDirEntry{FileInfo: f}
}
return out, nil
}
func (m *mockFS) Open(name string) (fs.File, error) {
return m.Fs.Open(name)
}
type mockDirEntry struct {
os.FileInfo
}
func (m *mockDirEntry) Type() fs.FileMode {
return m.Mode()
}
func (m *mockDirEntry) Info() (fs.FileInfo, error) {
return m.FileInfo, nil
}
func TestTemplate_Read(t *testing.T) {
testCases := map[string]struct {
inPath string
fs func() afero.Fs
wantedContent string
wantedErr error
}{
"template does not exist": {
inPath: "/fake/manifest.yml",
fs: func() afero.Fs {
return afero.NewMemMapFs()
},
wantedErr: errors.New("read template /fake/manifest.yml"),
},
"returns content": {
inPath: "/fake/manifest.yml",
fs: func() afero.Fs {
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/fake/", 0755)
_ = afero.WriteFile(fs, "templates/fake/manifest.yml", []byte("hello"), 0644)
return fs
},
wantedContent: "hello",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
tpl := &Template{
fs: &mockFS{Fs: tc.fs()},
}
// WHEN
c, err := tpl.Read(tc.inPath)
if tc.wantedErr != nil {
require.Contains(t, err.Error(), tc.wantedErr.Error())
} else {
require.Equal(t, tc.wantedContent, c.String())
}
})
}
}
func TestTemplate_UploadEnvironmentCustomResources(t *testing.T) {
testCases := map[string]struct {
fs func() afero.Fs
wantedErr error
}{
"success": {
fs: func() afero.Fs {
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/custom-resources/", 0755)
for _, file := range envCustomResourceFiles {
_ = afero.WriteFile(fs, fmt.Sprintf("templates/custom-resources/%s.js", file), []byte("hello"), 0644)
}
return fs
},
},
"errors if env custom resource file doesn't exist": {
fs: func() afero.Fs {
return afero.NewMemMapFs()
},
wantedErr: fmt.Errorf("read template custom-resources/dns-cert-validator.js: open templates/custom-resources/dns-cert-validator.js: file does not exist"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
tpl := &Template{
fs: &mockFS{tc.fs()},
}
mockUploader := s3.CompressAndUploadFunc(func(key string, files ...s3.NamedBinary) (string, error) {
require.Contains(t, key, "scripts")
require.Contains(t, key, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824")
return "mockURL", nil
})
// WHEN
gotCustomResources, err := tpl.UploadEnvironmentCustomResources(mockUploader)
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, len(envCustomResourceFiles), len(gotCustomResources))
}
})
}
}
func TestTemplate_Parse(t *testing.T) {
testCases := map[string]struct {
inPath string
inData interface{}
fs func() afero.Fs
wantedContent string
wantedErr error
}{
"template does not exist": {
inPath: "/fake/manifest.yml",
fs: func() afero.Fs {
return afero.NewMemMapFs()
},
wantedErr: errors.New("read template /fake/manifest.yml"),
},
"template cannot be parsed": {
inPath: "/fake/manifest.yml",
fs: func() afero.Fs {
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/fake", 0755)
_ = afero.WriteFile(fs, "templates/fake/manifest.yml", []byte(`{{}}`), 0644)
return fs
},
wantedErr: errors.New("parse template /fake/manifest.yml"),
},
"template cannot be executed": {
inPath: "/fake/manifest.yml",
inData: struct{}{},
fs: func() afero.Fs {
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/fake", 0755)
_ = afero.WriteFile(fs, "templates/fake/manifest.yml", []byte(`{{.Name}}`), 0644)
return fs
},
wantedErr: fmt.Errorf("execute template %s", "/fake/manifest.yml"),
},
"valid template": {
inPath: "/fake/manifest.yml",
inData: struct {
Name string
}{
Name: "webhook",
},
fs: func() afero.Fs {
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/fake", 0755)
_ = afero.WriteFile(fs, "templates/fake/manifest.yml", []byte(`{{.Name}}`), 0644)
return fs
},
wantedContent: "webhook",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
tpl := &Template{
fs: &mockFS{Fs: tc.fs()},
}
// WHEN
c, err := tpl.Parse(tc.inPath, tc.inData)
if tc.wantedErr != nil {
require.Contains(t, err.Error(), tc.wantedErr.Error())
} else {
require.Equal(t, tc.wantedContent, c.String())
}
})
}
}
| 226 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"bytes"
"crypto/sha256"
"fmt"
"strconv"
"text/template"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/dustin/go-humanize/english"
"github.com/google/uuid"
"github.com/aws/aws-sdk-go/aws"
)
// Constants for template paths.
const (
// Paths of workload cloudformation templates under templates/workloads/.
fmtWkldCFTemplatePath = "workloads/%s/%s/cf.yml"
fmtWkldPartialsCFTemplatePath = "workloads/partials/cf/%s.yml"
// Directories under templates/workloads/.
servicesDirName = "services"
jobDirName = "jobs"
// Names of workload templates.
lbWebSvcTplName = "lb-web"
rdWebSvcTplName = "rd-web"
backendSvcTplName = "backend"
workerSvcTplName = "worker"
staticSiteTplName = "static-site"
scheduledJobTplName = "scheduled-job"
)
// Constants for workload options.
const (
// AWS VPC networking configuration.
EnablePublicIP = "ENABLED"
DisablePublicIP = "DISABLED"
PublicSubnetsPlacement = "PublicSubnets"
PrivateSubnetsPlacement = "PrivateSubnets"
// RuntimePlatform configuration.
OSLinux = "LINUX"
OSWindowsServerFull = OSWindowsServer2019Full // Alias 2019 as Default WindowsSever Full platform.
OSWindowsServerCore = OSWindowsServer2019Core // Alias 2019 as Default WindowsSever Core platform.
OSWindowsServer2019Full = "WINDOWS_SERVER_2019_FULL"
OSWindowsServer2019Core = "WINDOWS_SERVER_2019_CORE"
OSWindowsServer2022Full = "WINDOWS_SERVER_2022_FULL"
OSWindowsServer2022Core = "WINDOWS_SERVER_2022_CORE"
ArchX86 = "X86_64"
ArchARM64 = "ARM64"
)
// Constants for ARN options.
const (
snsARNPattern = "arn:%s:sns:%s:%s:%s-%s-%s-%s"
)
// Constants for stack resource logical IDs
const (
LogicalIDHTTPListenerRuleWithDomain = "HTTPListenerRuleWithDomain"
)
const (
// NoExposedContainerPort indicates no port should be exposed for the service container.
NoExposedContainerPort = "-1"
)
var (
// Template names under "workloads/partials/cf/".
partialsWorkloadCFTemplateNames = []string{
"loggroup",
"envvars-container",
"envvars-common",
"secrets",
"executionrole",
"taskrole",
"workload-container",
"fargate-taskdef-base-properties",
"service-base-properties",
"servicediscovery",
"addons",
"sidecars",
"logconfig",
"autoscaling",
"eventrule",
"state-machine",
"state-machine-definition.json",
"efs-access-point",
"https-listener",
"http-listener",
"env-controller",
"mount-points",
"variables",
"volumes",
"image-overrides",
"instancerole",
"accessrole",
"publish",
"subscribe",
"nlb",
"vpc-connector",
"alb",
"rollback-alarms",
}
// Operating systems to determine Fargate platform versions.
osFamiliesForPV100 = []string{
OSWindowsServer2019Full, OSWindowsServer2019Core, OSWindowsServer2022Full, OSWindowsServer2022Core,
}
)
// WorkloadNestedStackOpts holds configuration that's needed if the workload stack has a nested stack.
type WorkloadNestedStackOpts struct {
StackName string
VariableOutputs []string
SecretOutputs []string
PolicyOutputs []string
SecurityGroupOutputs []string
}
// SidecarOpts holds configuration that's needed if the service has sidecar containers.
type SidecarOpts struct {
Name string
Image *string
Essential *bool
CredsParam *string
Variables map[string]Variable
Secrets map[string]Secret
Storage SidecarStorageOpts
DockerLabels map[string]string
DependsOn map[string]string
EntryPoint []string
Command []string
HealthCheck *ContainerHealthCheck
PortMappings []*PortMapping
}
// PortMapping holds container port mapping configuration.
type PortMapping struct {
Protocol string
ContainerPort uint16
ContainerName string
}
// SidecarStorageOpts holds data structures for rendering Mount Points inside of a sidecar.
type SidecarStorageOpts struct {
MountPoints []*MountPoint
}
// StorageOpts holds data structures for rendering Volumes and Mount Points
type StorageOpts struct {
Ephemeral *int
ReadonlyRootFS *bool
Volumes []*Volume
MountPoints []*MountPoint
EFSPerms []*EFSPermission
ManagedVolumeInfo *ManagedVolumeCreationInfo // Used for delegating CreationInfo for Copilot-managed EFS.
}
// requiresEFSCreation returns true if managed volume information is specified; false otherwise.
func (s *StorageOpts) requiresEFSCreation() bool {
return s.ManagedVolumeInfo != nil
}
// EFSPermission holds information needed to render an IAM policy statement.
type EFSPermission struct {
FilesystemID *string
Write bool
AccessPointID *string
}
// MountPoint holds information needed to render a MountPoint in a containerdefinition.
type MountPoint struct {
ContainerPath *string
ReadOnly *bool
SourceVolume *string
}
// Volume contains fields that render a volume, its name, and EFSVolumeConfiguration
type Volume struct {
Name *string
EFS *EFSVolumeConfiguration
}
// ManagedVolumeCreationInfo holds information about how to create Copilot-managed access points.
type ManagedVolumeCreationInfo struct {
Name *string
DirName *string
UID *uint32
GID *uint32
}
// EFSVolumeConfiguration contains information about how to specify externally managed file systems.
type EFSVolumeConfiguration struct {
// EFSVolumeConfiguration
Filesystem *string
RootDirectory *string // "/" or empty are equivalent
// Authorization Config
AccessPointID *string
IAM *string // ENABLED or DISABLED
}
// LogConfigOpts holds configuration that's needed if the service is configured with Firelens to route
// its logs.
type LogConfigOpts struct {
Image *string
Destination map[string]string
EnableMetadata *string
SecretOptions map[string]Secret
ConfigFile *string
Variables map[string]Variable
Secrets map[string]Secret
}
// HTTPTargetContainer represents the target group of a load balancer that points to a container.
type HTTPTargetContainer struct {
Name string
Port string
}
// Exposed returns true if the target container has an accessible port to receive traffic.
func (tg HTTPTargetContainer) Exposed() bool {
return tg.Port != "" && tg.Port != NoExposedContainerPort
}
// StrconvUint16 returns string converted from uint16.
func StrconvUint16(val uint16) string {
return strconv.FormatUint(uint64(val), 10)
}
// HTTPHealthCheckOpts holds configuration that's needed for HTTP Health Check.
type HTTPHealthCheckOpts struct {
// Fields with defaults always set.
HealthCheckPath string
GracePeriod int64
// Optional.
Port string
SuccessCodes string
HealthyThreshold *int64
UnhealthyThreshold *int64
Interval *int64
Timeout *int64
DeregistrationDelay *int64
}
type importable interface {
RequiresImport() bool
}
type importableValue interface {
importable
Value() string
}
// Variable represents the value of an environment variable.
type Variable importableValue
// ImportedVariable returns a Variable that should be imported from a stack.
func ImportedVariable(name string) Variable {
return importedEnvVar(name)
}
// PlainVariable returns a Variable that is a plain string value.
func PlainVariable(value string) Variable {
return plainEnvVar(value)
}
type plainEnvVar string
// RequiresImport returns false for a plain string environment variable.
func (v plainEnvVar) RequiresImport() bool {
return false
}
// Value returns the plain string value of the environment variable.
func (v plainEnvVar) Value() string {
return string(v)
}
type importedEnvVar string
// RequiresImport returns true for an imported environment variable.
func (v importedEnvVar) RequiresImport() bool {
return true
}
// Value returns the name of the import that will be the value of the environment variable.
func (v importedEnvVar) Value() string {
return string(v)
}
type importableSubValueFrom interface {
importable
RequiresSub() bool
ValueFrom() string
}
// A Secret represents an SSM or SecretsManager secret that can be rendered in CloudFormation.
type Secret importableSubValueFrom
// plainSSMOrSecretARN is a Secret stored that can be referred by an SSM Parameter Name or a secret ARN.
type plainSSMOrSecretARN struct {
value string
}
// RequiresSub returns true if the secret should be populated in CloudFormation with !Sub.
func (s plainSSMOrSecretARN) RequiresSub() bool {
return false
}
// RequiresImport returns true if the secret should be imported from other CloudFormation stack.
func (s plainSSMOrSecretARN) RequiresImport() bool {
return false
}
// ValueFrom returns the plain string value of the secret.
func (s plainSSMOrSecretARN) ValueFrom() string {
return s.value
}
// SecretFromPlainSSMOrARN returns a Secret that refers to an SSM parameter or a secret ARN.
func SecretFromPlainSSMOrARN(value string) plainSSMOrSecretARN {
return plainSSMOrSecretARN{
value: value,
}
}
// importedSSMorSecretARN is a Secret that can be referred by the name of the import value from env addon or an arbitary CloudFormation stack.
type importedSSMorSecretARN struct {
value string
}
// RequiresSub returns true if the secret should be populated in CloudFormation with !Sub.
func (s importedSSMorSecretARN) RequiresSub() bool {
return false
}
// RequiresImport returns true if the secret should be imported from env addon or an arbitary CloudFormation stack.
func (s importedSSMorSecretARN) RequiresImport() bool {
return true
}
// ValueFrom returns the name of the import value of the Secret.
func (s importedSSMorSecretARN) ValueFrom() string {
return s.value
}
// SecretFromImportedSSMOrARN returns a Secret that refers to imported name of SSM parameter or a secret ARN.
func SecretFromImportedSSMOrARN(value string) importedSSMorSecretARN {
return importedSSMorSecretARN{
value: value,
}
}
// secretsManagerName is a Secret that can be referred by a SecretsManager secret name.
type secretsManagerName struct {
value string
}
// RequiresSub returns true if the secret should be populated in CloudFormation with !Sub.
func (s secretsManagerName) RequiresSub() bool {
return true
}
// RequiresImport returns true if the secret should be imported from other CloudFormation stack.
func (s secretsManagerName) RequiresImport() bool {
return false
}
// ValueFrom returns the resource ID of the SecretsManager secret for populating the ARN.
func (s secretsManagerName) ValueFrom() string {
return fmt.Sprintf("secret:%s", s.value)
}
// Service returns the name of the SecretsManager service for populating the ARN.
func (s secretsManagerName) Service() string {
return secretsmanager.ServiceName
}
// SecretFromSecretsManager returns a Secret that refers to SecretsManager secret name.
func SecretFromSecretsManager(value string) secretsManagerName {
return secretsManagerName{
value: value,
}
}
// NetworkLoadBalancerListener holds configuration that's need for a Network Load Balancer listener.
type NetworkLoadBalancerListener struct {
// The port and protocol that the Network Load Balancer listens to.
Port string
Protocol string
// The target container and port to which the traffic is routed to from the Network Load Balancer.
TargetContainer string
TargetPort string
SSLPolicy *string // The SSL policy applied when using TLS protocol.
Stickiness *bool
HealthCheck NLBHealthCheck
DeregistrationDelay *int64
}
// NLBHealthCheck holds configuration for Network Load Balancer health check.
type NLBHealthCheck struct {
Port string // The port to which health check requests made from Network Load Balancer are routed to.
HealthyThreshold *int64
UnhealthyThreshold *int64
Timeout *int64
Interval *int64
GracePeriod *int64
}
// NetworkLoadBalancer holds configuration that's needed for a Network Load Balancer.
type NetworkLoadBalancer struct {
PublicSubnetCIDRs []string
Listener []NetworkLoadBalancerListener
MainContainerPort string
CertificateRequired bool
Aliases []string
}
// ALBListenerRule holds configuration that's needed for an Application Load Balancer listener rule.
type ALBListenerRule struct {
// The path that the Application Load Balancer listens to.
Path string
// The target container and port to which the traffic is routed to from the Application Load Balancer.
TargetContainer string
TargetPort string
Aliases []string
AllowedSourceIps []string
Stickiness string
HTTPHealthCheck HTTPHealthCheckOpts
HTTPVersion string
RedirectToHTTPS bool // Only relevant if HTTPSListener is true.
DeregistrationDelay *int64
}
// ALBListener holds configuration that's needed for an Application Load Balancer Listener.
type ALBListener struct {
Rules []ALBListenerRule
HostedZoneAliases AliasesForHostedZone
IsHTTPS bool // True if the listener listening on port 443.
MainContainerPort string
}
// Aliases return all the unique aliases specified across all the routing rules in ALB.
func (cfg *ALBListener) Aliases() []string {
var uniqueAliases []string
seen := make(map[string]struct{})
exists := struct{}{}
for _, rule := range cfg.Rules {
for _, entry := range rule.Aliases {
if _, value := seen[entry]; !value {
uniqueAliases = append(uniqueAliases, entry)
seen[entry] = exists
}
}
}
return uniqueAliases
}
// RulePaths returns a slice consisting of all the routing paths mentioned across multiple listener rules.
func (cfg *ALBListener) RulePaths() []string {
var rulePaths []string
for _, rule := range cfg.Rules {
rulePaths = append(rulePaths, rule.Path)
}
return rulePaths
}
// ServiceConnect holds configuration for ECS Service Connect.
type ServiceConnect struct {
Alias *string
}
// AdvancedCount holds configuration for autoscaling and capacity provider
// parameters.
type AdvancedCount struct {
Spot *int
Autoscaling *AutoscalingOpts
Cps []*CapacityProviderStrategy
}
// ContainerHealthCheck holds configuration for container health check.
type ContainerHealthCheck struct {
Command []string
Interval *int64
Retries *int64
StartPeriod *int64
Timeout *int64
}
// CapacityProviderStrategy holds the configuration needed for a
// CapacityProviderStrategyItem on a Service
type CapacityProviderStrategy struct {
Base *int
Weight *int
CapacityProvider string
}
// Cooldown holds configuration needed for autoscaling cooldown fields.
type Cooldown struct {
ScaleInCooldown *float64
ScaleOutCooldown *float64
}
// AutoscalingOpts holds configuration that's needed for Auto Scaling.
type AutoscalingOpts struct {
MinCapacity *int
MaxCapacity *int
CPU *float64
Memory *float64
Requests *float64
ResponseTime *float64
CPUCooldown Cooldown
MemCooldown Cooldown
ReqCooldown Cooldown
RespTimeCooldown Cooldown
QueueDelayCooldown Cooldown
QueueDelay *AutoscalingQueueDelayOpts
}
// AliasesForHostedZone maps hosted zone IDs to aliases that belong to it.
type AliasesForHostedZone map[string][]string
// AutoscalingQueueDelayOpts holds configuration to scale SQS queues.
type AutoscalingQueueDelayOpts struct {
AcceptableBacklogPerTask int
}
// ObservabilityOpts holds configurations for observability.
type ObservabilityOpts struct {
Tracing string // The name of the vendor used for tracing.
}
// DeploymentConfigurationOpts holds configuration for rolling deployments.
type DeploymentConfigurationOpts struct {
// The lower limit on the number of tasks that should be running during a service deployment or when a container instance is draining.
MinHealthyPercent int
// The upper limit on the number of tasks that should be running during a service deployment or when a container instance is draining.
MaxPercent int
Rollback RollingUpdateRollbackConfig
}
// RollingUpdateRollbackConfig holds config for rollback alarms.
type RollingUpdateRollbackConfig struct {
AlarmNames []string // Names of existing alarms.
// Custom alarms to create.
CPUUtilization *float64
MemoryUtilization *float64
MessagesDelayed *int
}
// HasRollbackAlarms returns true if the client is using ABR.
func (cfg RollingUpdateRollbackConfig) HasRollbackAlarms() bool {
return len(cfg.AlarmNames) > 0 || cfg.HasCustomAlarms()
}
// HasCustomAlarms returns true if the client is using Copilot-generated alarms for alarm-based rollbacks.
func (cfg RollingUpdateRollbackConfig) HasCustomAlarms() bool {
return cfg.CPUUtilization != nil || cfg.MemoryUtilization != nil || cfg.MessagesDelayed != nil
}
// TruncateAlarmName ensures that alarm names don't exceed the 255 character limit.
func (cfg RollingUpdateRollbackConfig) TruncateAlarmName(app, env, svc, alarmType string) string {
if len(app)+len(env)+len(svc)+len(alarmType) <= 255 {
return fmt.Sprintf("%s-%s-%s-%s", app, env, svc, alarmType)
}
maxSubstringLength := (255 - len(alarmType) - 3) / 3
return fmt.Sprintf("%s-%s-%s-%s", app[:maxSubstringLength], env[:maxSubstringLength], svc[:maxSubstringLength], alarmType)
}
// ExecuteCommandOpts holds configuration that's needed for ECS Execute Command.
type ExecuteCommandOpts struct{}
// StateMachineOpts holds configuration needed for State Machine retries and timeout.
type StateMachineOpts struct {
Timeout *int
Retries *int
}
// PublishOpts holds configuration needed if the service has publishers.
type PublishOpts struct {
Topics []*Topic
}
// Topic holds information needed to render a SNSTopic in a container definition.
type Topic struct {
Name *string
FIFOTopicConfig *FIFOTopicConfig
Region string
Partition string
AccountID string
App string
Env string
Svc string
}
// FIFOTopicConfig holds configuration needed if the topic is FIFO.
type FIFOTopicConfig struct {
ContentBasedDeduplication *bool
}
// SubscribeOpts holds configuration needed if the service has subscriptions.
type SubscribeOpts struct {
Topics []*TopicSubscription
Queue *SQSQueue
}
// HasTopicQueues returns true if any individual subscription has a dedicated queue.
func (s *SubscribeOpts) HasTopicQueues() bool {
for _, t := range s.Topics {
if t.Queue != nil {
return true
}
}
return false
}
// TopicSubscription holds information needed to render a SNS Topic Subscription in a container definition.
type TopicSubscription struct {
Name *string
Service *string
FilterPolicy *string
Queue *SQSQueue
}
// SQSQueue holds information needed to render a SQS Queue in a container definition.
type SQSQueue struct {
Retention *int64
Delay *int64
Timeout *int64
DeadLetter *DeadLetterQueue
FIFOQueueConfig *FIFOQueueConfig
}
// FIFOQueueConfig holds information needed to render a FIFO SQS Queue in a container definition.
type FIFOQueueConfig struct {
FIFOThroughputLimit *string
ContentBasedDeduplication *bool
DeduplicationScope *string
}
// DeadLetterQueue holds information needed to render a dead-letter SQS Queue in a container definition.
type DeadLetterQueue struct {
Tries *uint16
}
// NetworkOpts holds AWS networking configuration for the workloads.
type NetworkOpts struct {
SecurityGroups []SecurityGroup
AssignPublicIP string
// SubnetsType and SubnetIDs are mutually exclusive. They won't be set together.
SubnetsType string
SubnetIDs []string
DenyDefaultSecurityGroup bool
}
// SecurityGroup represents the ID of an additional security group associated with the tasks.
type SecurityGroup importableValue
// PlainSecurityGroup returns a SecurityGroup that is a plain string value.
func PlainSecurityGroup(value string) SecurityGroup {
return plainSecurityGroup(value)
}
// ImportedSecurityGroup returns a SecurityGroup that should be imported from a stack.
func ImportedSecurityGroup(name string) SecurityGroup {
return importedSecurityGroup(name)
}
type plainSecurityGroup string
// RequiresImport returns false for a plain string SecurityGroup.
func (sg plainSecurityGroup) RequiresImport() bool {
return false
}
// Value returns the plain string value of the SecurityGroup.
func (sg plainSecurityGroup) Value() string {
return string(sg)
}
type importedSecurityGroup string
// RequiresImport returns true for an imported SecurityGroup.
func (sg importedSecurityGroup) RequiresImport() bool {
return true
}
// Value returns the name of the import that will be the value of the SecurityGroup.
func (sg importedSecurityGroup) Value() string {
return string(sg)
}
// RuntimePlatformOpts holds configuration needed for Platform configuration.
type RuntimePlatformOpts struct {
OS string
Arch string
}
// IsDefault returns true if the platform matches the default docker image platform of "linux/amd64".
func (p RuntimePlatformOpts) IsDefault() bool {
if p.isEmpty() {
return true
}
if p.OS == OSLinux && p.Arch == ArchX86 {
return true
}
return false
}
// Version returns the Fargate platform version based on the selected os family.
func (p RuntimePlatformOpts) Version() string {
for _, os := range osFamiliesForPV100 {
if p.OS == os {
return "1.0.0"
}
}
return "LATEST"
}
func (p RuntimePlatformOpts) isEmpty() bool {
return p.OS == "" && p.Arch == ""
}
// S3ObjectLocation represents an object stored in an S3 bucket.
type S3ObjectLocation struct {
Bucket string // Name of the bucket.
Key string // Key of the object.
}
// WorkloadOpts holds optional data that can be provided to enable features in a workload stack template.
type WorkloadOpts struct {
AppName string
EnvName string
WorkloadName string
SerializedManifest string // Raw manifest file used to deploy the workload.
EnvVersion string
// Configuration for the main container.
PortMappings []*PortMapping
Variables map[string]Variable
Secrets map[string]Secret
EntryPoint []string
Command []string
// Additional options that are common between **all** workload templates.
Tags map[string]string // Used by App Runner workloads to tag App Runner service resources
NestedStack *WorkloadNestedStackOpts // Outputs from nested stacks such as the addons stack.
AddonsExtraParams string // Additional user defined Parameters for the addons stack.
Sidecars []*SidecarOpts
LogConfig *LogConfigOpts
Autoscaling *AutoscalingOpts
CapacityProviders []*CapacityProviderStrategy
DesiredCountOnSpot *int
Storage *StorageOpts
Network NetworkOpts
ExecuteCommand *ExecuteCommandOpts
Platform RuntimePlatformOpts
DockerLabels map[string]string
DependsOn map[string]string
Publish *PublishOpts
ServiceDiscoveryEndpoint string
ALBEnabled bool
CredentialsParameter string
PermissionsBoundary string
// Additional options for service templates.
WorkloadType string
HealthCheck *ContainerHealthCheck
HTTPTargetContainer HTTPTargetContainer
GracePeriod *int64
NLB *NetworkLoadBalancer
ALBListener *ALBListener
DeploymentConfiguration DeploymentConfigurationOpts
ServiceConnect *ServiceConnect
// Custom Resources backed by Lambda functions.
CustomResources map[string]S3ObjectLocation
// Additional options for job templates.
ScheduleExpression string
StateMachine *StateMachineOpts
// Additional options for request driven web service templates.
StartCommand *string
EnableHealthCheck bool
Observability ObservabilityOpts
Private bool
AppRunnerVPCEndpoint *string
Count *string
// Input needed for the custom resource that adds a custom domain to the service.
Alias *string
AWSSDKLayer *string
AppDNSDelegationRole *string
AppDNSName *string
// Additional options for worker service templates.
Subscribe *SubscribeOpts
// Additional options for static site template.
AssetMappingFileBucket string
AssetMappingFilePath string
StaticSiteAlias string
}
// HealthCheckProtocol returns the protocol for the Load Balancer health check,
// or an empty string if it shouldn't be configured, defaulting to the
// target protocol. (which is what happens, even if it isn't documented as such :))
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-healthcheckprotocol
func (lr ALBListenerRule) HealthCheckProtocol() string {
switch {
case lr.HTTPHealthCheck.Port == "443":
return "HTTPS"
case lr.TargetPort == "443" && lr.HTTPHealthCheck.Port == "":
return "HTTPS"
case lr.TargetPort == "443" && lr.HTTPHealthCheck.Port != "443":
// for backwards compatability, only set HTTP if target
// container is https but the specified health check port is not
return "HTTP"
}
return ""
}
// ParseLoadBalancedWebService parses a load balanced web service's CloudFormation template
// with the specified data object and returns its content.
func (t *Template) ParseLoadBalancedWebService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(lbWebSvcTplName, data, withSvcParsingFuncs())
}
// ParseRequestDrivenWebService parses a request-driven web service's CloudFormation template
// with the specified data object and returns its content.
func (t *Template) ParseRequestDrivenWebService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(rdWebSvcTplName, data, withSvcParsingFuncs())
}
// ParseBackendService parses a backend service's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseBackendService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(backendSvcTplName, data, withSvcParsingFuncs())
}
// ParseWorkerService parses a worker service's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseWorkerService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(workerSvcTplName, data, withSvcParsingFuncs())
}
// ParseStaticSite parses a static site service's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseStaticSite(data WorkloadOpts) (*Content, error) {
return t.parseSvc(staticSiteTplName, data, withSvcParsingFuncs())
}
// ParseScheduledJob parses a scheduled job's Cloudformation Template
func (t *Template) ParseScheduledJob(data WorkloadOpts) (*Content, error) {
return t.parseJob(scheduledJobTplName, data, withSvcParsingFuncs())
}
// parseSvc parses a service's CloudFormation template with the specified data object and returns its content.
func (t *Template) parseSvc(name string, data interface{}, options ...ParseOption) (*Content, error) {
return t.parseWkld(name, servicesDirName, data, options...)
}
// parseJob parses a job's Cloudformation template with the specified data object and returns its content.
func (t *Template) parseJob(name string, data interface{}, options ...ParseOption) (*Content, error) {
return t.parseWkld(name, jobDirName, data, options...)
}
func (t *Template) parseWkld(name, wkldDirName string, data interface{}, options ...ParseOption) (*Content, error) {
tpl, err := t.parse("base", fmt.Sprintf(fmtWkldCFTemplatePath, wkldDirName, name), options...)
if err != nil {
return nil, err
}
for _, templateName := range partialsWorkloadCFTemplateNames {
nestedTpl, err := t.parse(templateName, fmt.Sprintf(fmtWkldPartialsCFTemplatePath, templateName), options...)
if err != nil {
return nil, err
}
_, err = tpl.AddParseTree(templateName, nestedTpl.Tree)
if err != nil {
return nil, fmt.Errorf("add parse tree of %s to base template: %w", templateName, err)
}
}
buf := &bytes.Buffer{}
if err := tpl.Execute(buf, data); err != nil {
return nil, fmt.Errorf("execute template %s with data %v: %w", name, data, err)
}
return &Content{buf}, nil
}
func withSvcParsingFuncs() ParseOption {
return func(t *template.Template) *template.Template {
return t.Funcs(map[string]interface{}{
"toSnakeCase": ToSnakeCaseFunc,
"hasSecrets": hasSecrets,
"fmtSlice": FmtSliceFunc,
"quoteSlice": QuoteSliceFunc,
"quote": strconv.Quote,
"randomUUID": randomUUIDFunc,
"jsonMountPoints": generateMountPointJSON,
"jsonSNSTopics": generateSNSJSON,
"jsonQueueURIs": generateQueueURIJSON,
"envControllerParams": envControllerParameters,
"logicalIDSafe": StripNonAlphaNumFunc,
"wordSeries": english.WordSeries,
"pluralWord": english.PluralWord,
"contains": contains,
"requiresVPCConnector": requiresVPCConnector,
"strconvUint16": StrconvUint16,
"trancateWithHashPadding": trancateWithHashPadding,
})
}
}
func trancateWithHashPadding(s string, max, paddingLength int) string {
if len(s) <= max {
return s
}
h := sha256.New()
h.Write([]byte(s))
hash := fmt.Sprintf("%x", h.Sum(nil))
return s[:max] + hash[:paddingLength]
}
func hasSecrets(opts WorkloadOpts) bool {
if len(opts.Secrets) > 0 {
return true
}
if opts.NestedStack != nil && (len(opts.NestedStack.SecretOutputs) > 0) {
return true
}
return false
}
func randomUUIDFunc() (string, error) {
id, err := uuid.NewRandom()
if err != nil {
return "", fmt.Errorf("generate random uuid: %w", err)
}
return id.String(), err
}
// envControllerParameters determines which parameters to include in the EnvController template.
func envControllerParameters(o WorkloadOpts) []string {
parameters := []string{}
if o.WorkloadType == "Load Balanced Web Service" {
if o.ALBEnabled {
parameters = append(parameters, "ALBWorkloads,")
}
parameters = append(parameters, "Aliases,") // YAML needs the comma separator; resolved in EnvContr.
}
if o.WorkloadType == "Backend Service" {
if o.ALBEnabled {
parameters = append(parameters, "InternalALBWorkloads,")
}
}
if o.WorkloadType == "Request-Driven Web Service" {
if o.Private && o.AppRunnerVPCEndpoint == nil {
parameters = append(parameters, "AppRunnerPrivateWorkloads,")
}
}
if o.Network.SubnetsType == PrivateSubnetsPlacement {
parameters = append(parameters, "NATWorkloads,")
}
if o.Storage != nil && o.Storage.requiresEFSCreation() {
parameters = append(parameters, "EFSWorkloads,")
}
return parameters
}
func requiresVPCConnector(o WorkloadOpts) bool {
if o.WorkloadType != "Request-Driven Web Service" {
return false
}
return len(o.Network.SubnetIDs) > 0 || o.Network.SubnetsType != ""
}
func contains(list []string, s string) bool {
for _, item := range list {
if item == s {
return true
}
}
return false
}
// ARN determines the arn for a topic using the SNSTopic name and account information
func (t Topic) ARN() string {
return fmt.Sprintf(snsARNPattern, t.Partition, t.Region, t.AccountID, t.App, t.Env, t.Svc, aws.StringValue(t.Name))
}
| 1,008 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"fmt"
"testing"
"github.com/spf13/afero"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/require"
)
func TestTemplate_ParseSvc(t *testing.T) {
const (
testSvcName = "backend"
)
testCases := map[string]struct {
fs func() afero.Fs
wantedContent string
wantedErr error
}{
"renders all common templates": {
fs: func() afero.Fs {
var baseContent string
for _, name := range partialsWorkloadCFTemplateNames {
baseContent += fmt.Sprintf(`{{include "%s" . | indent 2}}`+"\n", name)
}
fs := afero.NewMemMapFs()
_ = fs.MkdirAll("templates/workloads/services/backend/", 0755)
_ = fs.MkdirAll("templates/workloads/partials/cf/", 0755)
_ = afero.WriteFile(fs, "templates/workloads/services/backend/cf.yml", []byte(baseContent), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/loggroup.yml", []byte("loggroup"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/envvars-container.yml", []byte("envvars-container"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/envvars-common.yml", []byte("envvars-common"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/secrets.yml", []byte("secrets"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/executionrole.yml", []byte("executionrole"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/taskrole.yml", []byte("taskrole"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/workload-container.yml", []byte("workload-container"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/fargate-taskdef-base-properties.yml", []byte("fargate-taskdef-base-properties"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/service-base-properties.yml", []byte("service-base-properties"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/servicediscovery.yml", []byte("servicediscovery"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/addons.yml", []byte("addons"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/sidecars.yml", []byte("sidecars"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/logconfig.yml", []byte("logconfig"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/autoscaling.yml", []byte("autoscaling"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/state-machine-definition.json.yml", []byte("state-machine-definition"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/eventrule.yml", []byte("eventrule"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/state-machine.yml", []byte("state-machine"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/efs-access-point.yml", []byte("efs-access-point"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/https-listener.yml", []byte("https-listener"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/http-listener.yml", []byte("http-listener"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/env-controller.yml", []byte("env-controller"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/mount-points.yml", []byte("mount-points"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/variables.yml", []byte("variables"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/volumes.yml", []byte("volumes"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/image-overrides.yml", []byte("image-overrides"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/instancerole.yml", []byte("instancerole"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/accessrole.yml", []byte("accessrole"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/publish.yml", []byte("publish"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/subscribe.yml", []byte("subscribe"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/nlb.yml", []byte("nlb"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/vpc-connector.yml", []byte("vpc-connector"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/alb.yml", []byte("alb"), 0644)
_ = afero.WriteFile(fs, "templates/workloads/partials/cf/rollback-alarms.yml", []byte("rollback-alarms"), 0644)
return fs
},
wantedContent: ` loggroup
envvars-container
envvars-common
secrets
executionrole
taskrole
workload-container
fargate-taskdef-base-properties
service-base-properties
servicediscovery
addons
sidecars
logconfig
autoscaling
eventrule
state-machine
state-machine-definition
efs-access-point
https-listener
http-listener
env-controller
mount-points
variables
volumes
image-overrides
instancerole
accessrole
publish
subscribe
nlb
vpc-connector
alb
rollback-alarms
`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
tpl := &Template{
fs: &mockFS{tc.fs()},
}
// WHEN
c, err := tpl.parseSvc(testSvcName, nil)
if tc.wantedErr != nil {
require.Contains(t, err.Error(), tc.wantedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.wantedContent, c.String())
}
})
}
}
func TestHasSecrets(t *testing.T) {
testCases := map[string]struct {
in WorkloadOpts
wanted bool
}{
"nil secrets": {
in: WorkloadOpts{},
wanted: false,
},
"no secrets": {
in: WorkloadOpts{
Secrets: map[string]Secret{},
},
wanted: false,
},
"service has secrets": {
in: WorkloadOpts{
Secrets: map[string]Secret{
"hello": SecretFromPlainSSMOrARN("world"),
},
},
wanted: true,
},
"nested has secrets": {
in: WorkloadOpts{
NestedStack: &WorkloadNestedStackOpts{
SecretOutputs: []string{"MySecretArn"},
},
},
wanted: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, hasSecrets(tc.in))
})
}
}
func TestRuntimePlatformOpts_Version(t *testing.T) {
testCases := map[string]struct {
in RuntimePlatformOpts
wantedPV string
}{
"should return LATEST for on empty platform": {
wantedPV: "LATEST",
},
"should return LATEST for linux containers": {
in: RuntimePlatformOpts{
OS: "LINUX",
Arch: "X86_64",
},
wantedPV: "LATEST",
},
"should return 1.0.0 for windows containers 2019 Core": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2019_CORE",
Arch: "X86_64",
},
wantedPV: "1.0.0",
},
"should return 1.0.0 for windows containers 2019 Full": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2019_FULL",
Arch: "X86_64",
},
wantedPV: "1.0.0",
},
"should return 1.0.0 for windows containers 2022 Core": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2022_CORE",
Arch: "X86_64",
},
wantedPV: "1.0.0",
},
"should return 1.0.0 for windows containers 2022 Full": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2022_FULL",
Arch: "X86_64",
},
wantedPV: "1.0.0",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wantedPV, tc.in.Version())
})
}
}
func TestRuntimePlatformOpts_IsDefault(t *testing.T) {
testCases := map[string]struct {
in RuntimePlatformOpts
wanted bool
}{
"should return true on empty platform": {
wanted: true,
},
"should return true for linux/x86_64": {
in: RuntimePlatformOpts{
OS: "LINUX",
Arch: "X86_64",
},
wanted: true,
},
"should return false for windows containers 2019 Core": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2019_CORE",
Arch: "X86_64",
},
},
"should return false for windows containers 2019 Full": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2019_FULL",
Arch: "X86_64",
},
},
"should return false for windows containers 2022 Core": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2022_CORE",
Arch: "X86_64",
},
},
"should return false for windows containers 2022 Full": {
in: RuntimePlatformOpts{
OS: "WINDOWS_SERVER_2022_FULL",
Arch: "X86_64",
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.wanted, tc.in.IsDefault())
})
}
}
func TestPlainSSMOrSecretARN_RequiresSub(t *testing.T) {
require.False(t, plainSSMOrSecretARN{}.RequiresSub(), "plain SSM Parameter Store or secret ARNs do not require !Sub")
}
func TestPlainSSMOrSecretARN_RequiresImport(t *testing.T) {
require.False(t, plainSSMOrSecretARN{}.RequiresImport(), "plain SSM Parameter Store or secret ARNs do not require !ImportValue")
}
func TestPlainSSMOrSecretARN_ValueFrom(t *testing.T) {
require.Equal(t, "/github/token", SecretFromPlainSSMOrARN("/github/token").ValueFrom())
}
func TestImportedSSMOrSecretARN_RequiresSub(t *testing.T) {
require.False(t, importedSSMorSecretARN{}.RequiresSub(), "imported SSM Parameter Store or secret ARNs do not require !Sub")
}
func TestImportedSSMOrSecretARN_RequiresImport(t *testing.T) {
require.True(t, importedSSMorSecretARN{}.RequiresImport(), "imported SSM Parameter Store or secret ARNs requires !ImportValue")
}
func TestImportedSSMOrSecretARN_ValueFrom(t *testing.T) {
require.Equal(t, "stack-SSMGHTokenName", SecretFromImportedSSMOrARN("stack-SSMGHTokenName").ValueFrom())
}
func TestSecretsManagerName_RequiresSub(t *testing.T) {
require.True(t, secretsManagerName{}.RequiresSub(), "secrets referring to a SecretsManager name need to be expanded to a full ARN")
}
func TestSecretsManagerName_RequiresImport(t *testing.T) {
require.False(t, secretsManagerName{}.RequiresImport(), "secrets referring to a SecretsManager name do not require !ImportValue")
}
func TestSecretsManagerName_Service(t *testing.T) {
require.Equal(t, "secretsmanager", secretsManagerName{}.Service())
}
func TestSecretsManagerName_ValueFrom(t *testing.T) {
require.Equal(t, "secret:aes128-1a2b3c", SecretFromSecretsManager("aes128-1a2b3c").ValueFrom())
}
func TestALBListenerRule_HealthCheckProtocol(t *testing.T) {
testCases := map[string]struct {
opts ALBListenerRule
expected string
}{
"target port 80, health check port unset": {
opts: ALBListenerRule{
TargetPort: "80",
},
},
"target port 80, health check port 443": {
opts: ALBListenerRule{
TargetPort: "80",
HTTPHealthCheck: HTTPHealthCheckOpts{
Port: "443",
},
},
expected: "HTTPS",
},
"target port 443, health check port unset": {
opts: ALBListenerRule{
TargetPort: "443",
},
expected: "HTTPS",
},
"target port 443, health check port 80": {
opts: ALBListenerRule{
TargetPort: "443",
HTTPHealthCheck: HTTPHealthCheckOpts{
Port: "80",
},
},
expected: "HTTP",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.expected, tc.opts.HealthCheckProtocol())
})
}
}
func TestEnvControllerParameters(t *testing.T) {
tests := map[string]struct {
opts WorkloadOpts
expected []string
}{
"LBWS": {
opts: WorkloadOpts{
WorkloadType: "Load Balanced Web Service",
},
expected: []string{"Aliases,"},
},
"LBWS with ALB": {
opts: WorkloadOpts{
WorkloadType: "Load Balanced Web Service",
ALBEnabled: true,
},
expected: []string{"ALBWorkloads,", "Aliases,"},
},
"LBWS with ALB and private placement": {
opts: WorkloadOpts{
WorkloadType: "Load Balanced Web Service",
ALBEnabled: true,
Network: NetworkOpts{
SubnetsType: PrivateSubnetsPlacement,
},
},
expected: []string{"ALBWorkloads,", "Aliases,", "NATWorkloads,"},
},
"LBWS with ALB, private placement, and storage": {
opts: WorkloadOpts{
WorkloadType: "Load Balanced Web Service",
ALBEnabled: true,
Network: NetworkOpts{
SubnetsType: PrivateSubnetsPlacement,
},
Storage: &StorageOpts{
ManagedVolumeInfo: &ManagedVolumeCreationInfo{
Name: aws.String("hi"),
},
},
},
expected: []string{"ALBWorkloads,", "Aliases,", "NATWorkloads,", "EFSWorkloads,"},
},
"Backend": {
opts: WorkloadOpts{
WorkloadType: "Backend Service",
},
expected: []string{},
},
"Backend with ALB": {
opts: WorkloadOpts{
WorkloadType: "Backend Service",
ALBEnabled: true,
},
expected: []string{"InternalALBWorkloads,"},
},
"RDWS": {
opts: WorkloadOpts{
WorkloadType: "Request-Driven Web Service",
},
expected: []string{},
},
"private RDWS": {
opts: WorkloadOpts{
WorkloadType: "Request-Driven Web Service",
Private: true,
},
expected: []string{"AppRunnerPrivateWorkloads,"},
},
"private RDWS with imported VPC Endpoint": {
opts: WorkloadOpts{
WorkloadType: "Request-Driven Web Service",
Private: true,
AppRunnerVPCEndpoint: aws.String("vpce-1234"),
},
expected: []string{},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.expected, envControllerParameters(tc.opts))
})
}
}
func TestRollingUpdateRollbackConfig_TruncateAlarmName(t *testing.T) {
testCases := map[string]struct {
config RollingUpdateRollbackConfig
inApp string
inEnv string
inSvc string
inAlarmType string
expected string
}{
"with no need to truncate": {
inApp: "shortAppName",
inEnv: "shortEnvName",
inSvc: "shortSvcName",
inAlarmType: "CopilotRollbackMemAlarm",
expected: "shortAppName-shortEnvName-shortSvcName-CopilotRollbackMemAlarm",
},
"with need to truncate at 76 chars per element": {
inApp: "12345678911234567892123456789312345678941234567895123456789612345678971234567898",
inEnv: "12345678911234567892123456789312345678941234567895123456789612345678971234567898",
inSvc: "12345678911234567892123456789312345678941234567895123456789612345678971234567898",
inAlarmType: "CopilotRollbackCPUAlarm",
expected: "1234567891123456789212345678931234567894123456789512345678961234567897123456-1234567891123456789212345678931234567894123456789512345678961234567897123456-1234567891123456789212345678931234567894123456789512345678961234567897123456-CopilotRollbackCPUAlarm",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.expected, tc.config.TruncateAlarmName(tc.inApp, tc.inEnv, tc.inSvc, tc.inAlarmType))
})
}
}
func TestApplicationLoadBalancer_Aliases(t *testing.T) {
tests := map[string]struct {
opts ALBListener
expected []string
}{
"LBWS with multiple listener rules having multiple aliases each": {
opts: ALBListener{
Rules: []ALBListenerRule{
{
Aliases: []string{
"testAlias1",
"testAlias2",
},
},
{
Aliases: []string{
"testAlias1",
"testAlias3",
},
},
},
},
expected: []string{"testAlias1", "testAlias2", "testAlias3"},
},
"LBWS having no aliases": {
opts: ALBListener{
Rules: []ALBListenerRule{{}, {}},
},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.expected, tc.opts.Aliases())
})
}
}
func Test_trancateWithHashPadding(t *testing.T) {
tests := map[string]struct {
inString string
inMax int
inPadding int
expected string
}{
"less than max": {
inString: "mockString",
inMax: 64,
inPadding: 0,
expected: "mockString",
},
"truncate with hash padding": {
inString: "longapp-longenv-longsvc",
inMax: 10,
inPadding: 6,
expected: "longapp-lo7693be",
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
require.Equal(t, tc.expected, trancateWithHashPadding(tc.inString, tc.inMax, tc.inPadding))
})
}
}
| 534 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package artifactpath holds functions to generate the S3 object path for artifacts.
package artifactpath
import (
"crypto/sha256"
"fmt"
"path"
"path/filepath"
)
const (
s3ArtifactDirName = "manual"
s3TemplateDirName = "templates"
s3ArtifactAddonsDirName = "addons"
s3ArtifactAddonAssetDirName = "assets"
s3ArtifactEnvFilesDirName = "env-files"
s3ScriptsDirName = "scripts"
s3CustomResourcesDirName = "custom-resources"
s3EnvironmentsAddonsDirName = "environments"
)
// MkdirSHA256 prefixes the key with the SHA256 hash of the contents of "manual/<hash>/key".
func MkdirSHA256(key string, content []byte) string {
return path.Join(s3ArtifactDirName, fmt.Sprintf("%x", sha256.Sum256(content)), key)
}
// Addons returns the path to store addon artifact files with sha256 of the content.
// Example: manual/addons/key/sha.yml.
func Addons(key string, content []byte) string {
return path.Join(s3ArtifactDirName, s3ArtifactAddonsDirName, key, fmt.Sprintf("%x.yml", sha256.Sum256(content)))
}
// AddonAsset returns the path to store an addon asset file.
// Example: manual/addons/frontend/assets/668e2b73ac.
func AddonAsset(workloadName, hash string) string {
return path.Join(s3ArtifactDirName, s3ArtifactAddonsDirName, workloadName, s3ArtifactAddonAssetDirName, hash)
}
// EnvironmentAddons returns the path to store environment addon artifact files with sha256 of the content.
// Example: manual/addons/environments/sha.yml.
func EnvironmentAddons(content []byte) string {
return path.Join(s3ArtifactDirName, s3ArtifactAddonsDirName, s3EnvironmentsAddonsDirName, fmt.Sprintf("%x.yml", sha256.Sum256(content)))
}
// EnvironmentAddonAsset returns the path to store an addon asset file for an environment addon.
// Example: manual/addons/environments/assets/668e2b73ac.
func EnvironmentAddonAsset(hash string) string {
return path.Join(s3ArtifactDirName, s3ArtifactAddonsDirName, s3EnvironmentsAddonsDirName, s3ArtifactAddonAssetDirName, hash)
}
// CFNTemplate returns the path to store cloudformation templates with sha256 of the content.
// Example: manual/templates/key/sha.yml.
func CFNTemplate(key string, content []byte) string {
return path.Join(s3ArtifactDirName, s3TemplateDirName, key, fmt.Sprintf("%x.yml", sha256.Sum256(content)))
}
// EnvFiles returns the path to store an env file artifact with sha256 of the content..
// Example: manual/env-files/key/sha.env.
func EnvFiles(key string, content []byte) string {
// use filepath.Base to prevent cryptic errors in the ecs agent for paths like "..\magic.env"
return path.Join(s3ArtifactDirName, s3ArtifactEnvFilesDirName, filepath.Base(key), fmt.Sprintf("%x.env", sha256.Sum256(content)))
}
// CustomResource returns the path to store a custom resource with a sha256 of the contents of the file.
// Example: manual/scripts/custom-resources/key/sha.zip
func CustomResource(key string, zipFile []byte) string {
return path.Join(s3ArtifactDirName, s3ScriptsDirName, s3CustomResourcesDirName, key, fmt.Sprintf("%x.zip", sha256.Sum256(zipFile)))
}
| 72 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package artifactpath
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCustomResource(t *testing.T) {
require.Equal(t, "manual/scripts/custom-resources/envcontrollerfunction/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.zip", CustomResource("envcontrollerfunction", []byte("")))
}
func TestEnvironmentAddons(t *testing.T) {
require.Equal(t, "manual/addons/environments/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.yml", EnvironmentAddons([]byte("")))
}
func TestEnvironmentAddonsAsset(t *testing.T) {
require.Equal(t, "manual/addons/environments/assets/hash", EnvironmentAddonAsset("hash"))
}
| 23 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package diff provides functionalities to compare two YAML documents.
package diff
import (
"fmt"
"io"
"sort"
"gopkg.in/yaml.v3"
)
// Tree represents a difference tree between two YAML documents.
type Tree struct {
root diffNode
}
func (t Tree) Write(w io.Writer) error {
tw := &treeWriter{t, w}
return tw.write()
}
// diffNode is the interface to represents the difference between two *yaml.Node.
type diffNode interface {
key() string
newYAML() *yaml.Node
oldYAML() *yaml.Node
children() []diffNode
}
// keyNode is a concrete implementation of a diffNode.
type keyNode struct {
keyValue string
childNodes []diffNode // A list of non-empty pointers to the children nodes.
oldV *yaml.Node // Only populated for a leaf node (i.e. that has no child node).
newV *yaml.Node // Only populated for a leaf node (i.e. that has no child node).
}
func (n *keyNode) key() string {
return n.keyValue
}
func (n *keyNode) newYAML() *yaml.Node {
return n.newV
}
func (n *keyNode) oldYAML() *yaml.Node {
return n.oldV
}
func (n *keyNode) children() []diffNode {
return n.childNodes
}
type unchangedNode struct {
count int
}
func (n *unchangedNode) children() []diffNode {
return nil
}
func (n *unchangedNode) key() string {
return ""
}
func (n *unchangedNode) newYAML() *yaml.Node {
return nil
}
func (n *unchangedNode) oldYAML() *yaml.Node {
return nil
}
func (n *unchangedNode) unchangedCount() int {
return n.count
}
type seqItemNode struct {
keyNode
}
// From is the YAML document that another YAML document is compared against.
type From []byte
// ParseWithCFNOverriders constructs a diff tree that represent the differences of a YAML document against the From document with
// overriders designed for CFN documents, including:
// 1. An ignorer that ignores diffs under "Metadata.Manifest".
// 2. An overrider that is able to compare intrinsic functions with full/short form correctly.
func (from From) ParseWithCFNOverriders(to []byte) (Tree, error) {
return from.Parse(to,
&ignorer{
curr: &ignoreSegment{
key: "Metadata",
next: &ignoreSegment{
key: "Manifest",
},
},
},
&getAttConverter{},
&intrinsicFuncMapTagConverter{})
}
// Parse constructs a diff tree that represent the differences of a YAML document against the From document.
func (from From) Parse(to []byte, overriders ...overrider) (Tree, error) {
var toNode, fromNode yaml.Node
if err := yaml.Unmarshal(to, &toNode); err != nil {
return Tree{}, fmt.Errorf("unmarshal current template: %w", err)
}
if err := yaml.Unmarshal(from, &fromNode); err != nil {
return Tree{}, fmt.Errorf("unmarshal old template: %w", err)
}
var root diffNode
var err error
switch {
// NOTE: If Kind is 0, it means the document is empty and nothing is unmarshalled.
case fromNode.Kind == 0 && toNode.Kind == 0:
return Tree{}, nil
case fromNode.Kind == 0:
root, err = parse(nil, &toNode, "", overriders...)
case toNode.Kind == 0:
root, err = parse(&fromNode, nil, "", overriders...)
default:
root, err = parse(&fromNode, &toNode, "", overriders...)
}
if err != nil {
return Tree{}, err
}
if root == nil {
return Tree{}, nil
}
return Tree{
root: root,
}, nil
}
func parse(from, to *yaml.Node, key string, overriders ...overrider) (diffNode, error) {
for _, overrider := range overriders {
if overrider.match(from, to, key, overrider) {
return overrider.parse(from, to, key, overrider)
}
}
// Handle base cases.
if to == nil || from == nil || to.Kind != from.Kind {
return &keyNode{
keyValue: key,
newV: to,
oldV: from,
}, nil
}
if isYAMLLeaf(to) && isYAMLLeaf(from) {
if to.Value == from.Value {
return nil, nil
}
return &keyNode{
keyValue: key,
newV: to,
oldV: from,
}, nil
}
var children []diffNode
var err error
switch {
case to.Kind == yaml.SequenceNode && from.Kind == yaml.SequenceNode:
children, err = parseSequence(from, to, overriders...)
case to.Kind == yaml.DocumentNode && from.Kind == yaml.DocumentNode:
fallthrough
case to.Kind == yaml.MappingNode && from.Kind == yaml.MappingNode:
children, err = parseMap(from, to, overriders...)
default:
return nil, fmt.Errorf("unknown combination of node kinds: %v, %v", to.Kind, from.Kind)
}
if err != nil {
return nil, fmt.Errorf("parse YAML content with key %s: %w", key, err)
}
if len(children) == 0 {
return nil, nil
}
return &keyNode{
keyValue: key,
childNodes: children,
}, nil
}
func isYAMLLeaf(node *yaml.Node) bool {
return len(node.Content) == 0
}
func parseSequence(fromNode, toNode *yaml.Node, overriders ...overrider) ([]diffNode, error) {
fromSeq, toSeq := make([]yaml.Node, len(fromNode.Content)), make([]yaml.Node, len(toNode.Content)) // NOTE: should be the same as calling `Decode`.
for idx, v := range fromNode.Content {
fromSeq[idx] = *v
}
for idx, v := range toNode.Content {
toSeq[idx] = *v
}
type cachedEntry struct {
node diffNode
err error
}
cachedDiff := make(map[string]cachedEntry)
lcsIndices := longestCommonSubsequence(fromSeq, toSeq, func(idxFrom, idxTo int) bool {
// Note: This function passed as `eq` should be a pure function. Therefore, its output is the same
// given the same `idxFrom` and `idxTo`. Hence, it is not necessary to parse the nodes again.
// In `lcs.go`, `eq` can be called twice on the same indices: once when computing LCS length, and
// once when back-tracing to construct the LCS.
if diff, ok := cachedDiff[cacheKey(idxFrom, idxTo)]; ok {
return diff.err == nil && diff.node == nil
}
diff, err := parse(&(fromSeq[idxFrom]), &(toSeq[idxTo]), "", overriders...)
if diff != nil { // NOTE: cache the diff only if a modification could have happened at this position.
cachedDiff[cacheKey(idxFrom, idxTo)] = cachedEntry{
node: diff,
err: err,
}
}
return err == nil && diff == nil
})
// No difference if the two sequences have the same size and the LCS is the entire sequence.
if len(fromSeq) == len(toSeq) && len(lcsIndices) == len(fromSeq) {
return nil, nil
}
var children []diffNode
var matchCount int
inspector := newLCSStateMachine(fromSeq, toSeq, lcsIndices)
for action := inspector.action(); action != actionDone; action = inspector.action() {
switch action {
case actionMatch:
matchCount++
if action := inspector.peek(); action != actionMatch {
children = append(children, &unchangedNode{count: matchCount})
matchCount = 0
}
case actionMod:
diff := cachedDiff[cacheKey(inspector.fromIndex(), inspector.toIndex())]
if diff.err != nil {
return nil, diff.err
}
children = append(children, &seqItemNode{
keyNode{
keyValue: diff.node.key(),
childNodes: diff.node.children(),
oldV: diff.node.oldYAML(),
newV: diff.node.newYAML(),
},
})
case actionDel:
item := inspector.fromItem()
children = append(children, &seqItemNode{
keyNode{
oldV: &item,
},
})
case actionInsert:
item := inspector.toItem()
children = append(children, &seqItemNode{
keyNode{
newV: &item,
},
})
}
inspector.next()
}
return children, nil
}
func parseMap(from, to *yaml.Node, overriders ...overrider) ([]diffNode, error) {
currMap, oldMap := make(map[string]yaml.Node), make(map[string]yaml.Node)
if err := to.Decode(currMap); err != nil {
return nil, err
}
if err := from.Decode(oldMap); err != nil {
return nil, err
}
keys := unionOfKeys(currMap, oldMap)
sort.SliceStable(keys, func(i, j int) bool { return keys[i] < keys[j] }) // NOTE: to avoid flaky unit tests.
var children []diffNode
for _, k := range keys {
var currV, oldV *yaml.Node
if v, ok := oldMap[k]; ok {
oldV = &v
}
if v, ok := currMap[k]; ok {
currV = &v
}
kDiff, err := parse(oldV, currV, k, overriders...)
if err != nil {
return nil, err
}
if kDiff != nil {
children = append(children, kDiff)
}
}
return children, nil
}
func unionOfKeys[T any](a, b map[string]T) []string {
exists, keys := struct{}{}, make(map[string]struct{})
for k := range a {
keys[k] = exists
}
for k := range b {
keys[k] = exists
}
keySlice, idx := make([]string, len(keys)), 0
for k := range keys {
keySlice[idx] = k
idx++
}
return keySlice
}
func cacheKey(inFrom, inTo int) string {
return fmt.Sprintf("%d,%d", inFrom, inTo)
}
| 320 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package diff
import (
"errors"
"reflect"
"testing"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestFrom_Parse(t *testing.T) {
testCases := map[string]struct {
curr string
old string
wanted func() diffNode
wantedError error
}{
"add a map": {
curr: `Mary:
Height:
cm: 168
Weight:
kg: 52`,
old: `Mary:
Height:
cm: 168`,
wanted: func() diffNode {
/* sentinel -> Mary -> Weight: {new: "kg:52", old: nil} */
leaf := &keyNode{
keyValue: "Weight",
newV: yamlNode("kg: 52", t),
}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "Mary",
childNodes: []diffNode{leaf},
},
},
}
},
},
"remove a map": {
curr: `Mary:
Height:
cm: 168`,
old: `Mary:
Height:
cm: 168
Weight:
kg: 52`,
wanted: func() diffNode {
/* sentinel -> Mary -> Weight: {new: nil, old: "kg:52"} */
leaf := &keyNode{
keyValue: "Weight",
oldV: yamlNode("kg: 52", t),
}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "Mary",
childNodes: []diffNode{leaf},
},
},
}
},
},
"change keyed values": {
curr: `Mary:
Height:
cm: 168
CanFight: no
FavoriteWord: peace`,
old: `Mary:
Height:
cm: 190
CanFight: yes
FavoriteWord: muscle`,
wanted: func() diffNode {
/* sentinel
-> Mary
-> Height --> cm: {new: 168, old: 190}
-> CanFight: {new: no, old: yes}
-> FavoriteWord: {new: peace, old: muscle}
*/
leafCM := &keyNode{
keyValue: "cm",
newV: yamlScalarNode("168"),
oldV: yamlScalarNode("190"),
}
leafCanFight := &keyNode{
keyValue: "CanFight",
newV: yamlScalarNode("no"),
oldV: yamlScalarNode("yes"),
}
leafFavWord := &keyNode{
keyValue: "FavoriteWord",
newV: yamlScalarNode("peace"),
oldV: yamlScalarNode("muscle"),
}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "Mary",
childNodes: []diffNode{
leafCanFight,
leafFavWord,
&keyNode{
keyValue: "Height",
childNodes: []diffNode{leafCM},
},
},
},
},
}
},
},
"list does not change": {
old: `Alphabet: [a,b,c,d]`,
curr: `Alphabet: [a,b,c,d]`,
wanted: func() diffNode {
return nil
},
},
"list reordered": {
old: `SizeRank: [bear,dog,cat,mouse]`,
curr: `SizeRank: [bear,cat,dog,mouse]`,
wanted: func() diffNode {
/* sentinel
-> SizeRank
-> 1 unchanged item (bear)
-> {old: dog, new: nil} // Deletion.
-> 1 unchanged item (cat)
-> {old: nil, new: dog} // Insertion.
-> 1 unchanged item (mouse)
*/
leaf1 := &seqItemNode{
keyNode{oldV: yamlScalarNode("dog")},
}
leaf2 := &seqItemNode{
keyNode{newV: yamlScalarNode("dog")},
}
unchangedBear, unchangedCat, unchangedMouse := &unchangedNode{count: 1}, &unchangedNode{count: 1}, &unchangedNode{count: 1}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "SizeRank",
childNodes: []diffNode{unchangedBear, leaf1, unchangedCat, leaf2, unchangedMouse},
},
},
}
},
},
"list with insertion": {
old: `DanceCompetition: [dog,bear,cat]`,
curr: `DanceCompetition: [dog,bear,mouse,cat]`,
wanted: func() diffNode {
/* sentinel
-> DanceCompetition
-> 2 unchanged items (dog, bear)
-> {old: nil, new: mouse} // Insertion.
-> 1 unchanged item (cat)
*/
leaf := &seqItemNode{
keyNode{newV: yamlScalarNode("mouse")},
}
unchangedDogBear, unchangedCat := &unchangedNode{count: 2}, &unchangedNode{count: 1}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "DanceCompetition",
childNodes: []diffNode{unchangedDogBear, leaf, unchangedCat},
},
},
}
},
},
"list with deletion": {
old: `PotatoChipCommittee: [dog,bear,cat,mouse]`,
curr: `PotatoChipCommittee: [dog,bear,mouse]`,
wanted: func() diffNode {
/* sentinel
-> PotatoChipCommittee
-> 2 unchanged items (dog, bear)
-> {old: cat, new: nil} // Deletion.
-> 1 unchanged item (mouse)
*/
leaf := &seqItemNode{
keyNode{oldV: yamlScalarNode("cat")},
}
unchangedDobBear, unchangedMouse := &unchangedNode{count: 2}, &unchangedNode{count: 1}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "PotatoChipCommittee",
childNodes: []diffNode{unchangedDobBear, leaf, unchangedMouse},
},
},
}
},
},
"list with a scalar value changed": {
old: `DogsFavoriteShape: [triangle,circle,rectangle]`,
curr: `DogsFavoriteShape: [triangle,ellipse,rectangle]`,
wanted: func() diffNode {
/* sentinel
-> DogsFavoriteShape
-> {old: circle, new: ellipse} // Modification.
*/
leaf := &seqItemNode{
keyNode{
oldV: yamlScalarNode("circle"),
newV: yamlScalarNode("ellipse"),
},
}
unchangedTri, unchangedRec := &unchangedNode{1}, &unchangedNode{1}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "DogsFavoriteShape",
childNodes: []diffNode{unchangedTri, leaf, unchangedRec},
},
},
}
},
},
"change a map to scalar": {
curr: `Mary:
Dialogue: "Said bear: 'I know I'm supposed to keep an eye on you"`,
old: `Mary:
Dialogue:
Bear: "I know I'm supposed to keep an eye on you"`,
wanted: func() diffNode {
/* sentinel -> Mary -> Dialogue --> {new: map, old: scalar} */
leafDialogue := &keyNode{
keyValue: "Dialogue",
newV: yamlScalarNode("Said bear: 'I know I'm supposed to keep an eye on you", withStyle(yaml.DoubleQuotedStyle)),
oldV: yamlNode("Bear: \"I know I'm supposed to keep an eye on you\"", t),
}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "Mary",
childNodes: []diffNode{leafDialogue},
},
},
}
},
},
"change a list to scalar": {
curr: `Mary:
Dialogue: "Said bear: 'I know I'm supposed to keep an eye on you; Said Dog: 'ikr'"`,
old: `Mary:
Dialogue:
- Bear: "I know I'm supposed to keep an eye on you"
Tone: disappointed
- Dog: "ikr"
Tone: pleased`,
wanted: func() diffNode {
/* sentinel -> Mary -> Dialogue --> {new: list, old: scalar} */
leafDialogue := &keyNode{
keyValue: "Dialogue",
newV: yamlScalarNode("Said bear: 'I know I'm supposed to keep an eye on you; Said Dog: 'ikr'", withStyle(yaml.DoubleQuotedStyle)),
oldV: yamlNode(`- Bear: "I know I'm supposed to keep an eye on you"
Tone: disappointed
- Dog: "ikr"
Tone: pleased`, t),
}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "Mary",
childNodes: []diffNode{leafDialogue},
},
},
}
},
},
"change a map to list": {
curr: `Mary:
Dialogue:
- Bear: "I know I'm supposed to keep an eye on you"
Tone: disappointed
- Dog: "ikr"
Tone: pleased`,
old: `Mary:
Dialogue:
Bear: (disappointed) "I know I'm supposed to keep an eye on you"
Dog: (pleased) "ikr"`,
wanted: func() diffNode {
/* sentinel -> Mary -> Dialogue --> {new: list, old: map} */
leafDialogue := &keyNode{
keyValue: "Dialogue",
newV: yamlNode(`- Bear: "I know I'm supposed to keep an eye on you"
Tone: disappointed
- Dog: "ikr"
Tone: pleased`, t),
oldV: yamlNode(`Bear: (disappointed) "I know I'm supposed to keep an eye on you"
Dog: (pleased) "ikr"`, t),
}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "Mary",
childNodes: []diffNode{leafDialogue},
},
},
}
},
},
"no diff": {
curr: `Mary:
Height:
cm: 190
CanFight: yes
FavoriteWord: muscle`,
old: `Mary:
Height:
cm: 190
CanFight: yes
FavoriteWord: muscle`,
wanted: func() diffNode {
return nil
},
},
"error unmarshalling": {
curr: ` !!1?Mary:`,
wantedError: errors.New("unmarshal current template: yaml: found character that cannot start any token"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got, err := From(tc.old).Parse([]byte(tc.curr))
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.True(t, equalTree(got, Tree{tc.wanted()}, t), "should get the expected tree")
}
})
}
}
func TestFrom_ParseWithCFNOverriders(t *testing.T) {
testCases := map[string]struct {
curr string
old string
wanted func() diffNode
wantedError error
}{
"diff in metadata manifest is ignored": {
old: `Description: CloudFormation environment template for infrastructure shared among Copilot workloads.
Metadata:
Version: v1.26.0
Manifest: I don't see any difference.`,
curr: `Description: CloudFormation environment template for infrastructure shared among Copilot workloads.
Metadata:
Version: v1.27.0
Manifest: There is definitely a difference.`,
wanted: func() diffNode {
/* sentinel -> Metadata -> Version*/
leaf := &keyNode{
keyValue: "Version",
oldV: yamlScalarNode("v1.26.0"),
newV: yamlScalarNode("v1.27.0"),
}
return &keyNode{
childNodes: []diffNode{
&keyNode{
keyValue: "Metadata",
childNodes: []diffNode{leaf},
},
},
}
},
},
"no diff between full/short form intrinsic func": {
curr: `Value: !Sub 'blah'
AvailabilityZone: !Select [0, !GetAZs '']
SecurityGroups:
- !GetAtt InternalLoadBalancerSecurityGroup.GroupId
StringsEquals:
iam:ResourceTag/copilot-application: !Sub '${AppName}'
Properties:
GroupDescription: !Join ['', [!Ref AppName, '-', !Ref EnvironmentName, EnvironmentSecurityGroup]]
`,
old: `Value:
Fn::Sub: 'blah'
AvailabilityZone:
Fn::Select:
- 0
- Fn::GetAZs: ""
SecurityGroups:
- Fn::GetAtt: InternalLoadBalancerSecurityGroup.GroupId
StringsEquals:
iam:ResourceTag/copilot-application:
Fn::Sub: ${AppName}
Properties:
GroupDescription:
Fn::Join:
- ""
- - Ref: AppName
- "-"
- Ref: EnvironmentName
- EnvironmentSecurityGroup
`,
wanted: func() diffNode {
return nil
},
},
"no diff": {
old: `Description: CloudFormation environment template for infrastructure shared among Copilot workloads.
Metadata:
Manifest: I don't see any difference.`,
curr: `Description: CloudFormation environment template for infrastructure shared among Copilot workloads.
Metadata:
Manifest: There is definitely a difference.`,
wanted: func() diffNode {
return nil
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got, err := From(tc.old).ParseWithCFNOverriders([]byte(tc.curr))
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.NoError(t, err)
require.True(t, equalTree(got, Tree{tc.wanted()}, t), "should get the expected tree")
}
})
}
}
type nodeModifier func(node *yaml.Node)
func withStyle(style yaml.Style) nodeModifier {
return func(node *yaml.Node) {
node.Style = style
}
}
func yamlNode(content string, t *testing.T) *yaml.Node {
var node yaml.Node
require.NoError(t, yaml.Unmarshal([]byte(content), &node), "should be able to unmarshal the wanted content")
// The root YAML node is a document node. We want the first content node.
return node.Content[0]
}
func yamlScalarNode(value string, opts ...nodeModifier) *yaml.Node {
node := &yaml.Node{
Kind: yaml.ScalarNode,
Value: value,
}
for _, opt := range opts {
opt(node)
}
return node
}
func equalTree(a, b Tree, t *testing.T) bool {
if a.root == nil || b.root == nil {
return a.root == nil && b.root == nil
}
return equalSubTree(a.root, b.root, t)
}
func equalSubTree(a, b diffNode, t *testing.T) bool {
if a == nil || b == nil {
return a == nil && b == nil
}
if a.key() != b.key() || len(a.children()) != len(b.children()) || reflect.TypeOf(a) != reflect.TypeOf(b) {
return false
}
if len(a.children()) == 0 {
return equalLeaves(a, b, t)
}
for idx := range a.children() {
if equal := equalSubTree(a.children()[idx], b.children()[idx], t); !equal {
return false
}
}
return true
}
func equalLeaves(a, b diffNode, t *testing.T) bool {
if _, ok := a.(*unchangedNode); ok {
return a.(*unchangedNode).unchangedCount() == b.(*unchangedNode).unchangedCount()
}
aNew, err := yaml.Marshal(a.newYAML())
require.NoError(t, err)
bNew, err := yaml.Marshal(b.newYAML())
require.NoError(t, err)
aOld, err := yaml.Marshal(a.oldYAML())
require.NoError(t, err)
bOld, err := yaml.Marshal(b.oldYAML())
require.NoError(t, err)
return string(aNew) == string(bNew) && string(aOld) == string(bOld)
}
| 505 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package diff
import (
"fmt"
"strings"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"gopkg.in/yaml.v3"
)
type formatter interface {
formatInsert(node diffNode) (string, error)
formatDel(node diffNode) (string, error)
formatMod(node diffNode) (string, error)
formatPath(node diffNode) string
nextIndent() int
}
type seqItemFormatter struct {
indent int
}
func (f *seqItemFormatter) formatDel(node diffNode) (string, error) {
raw, err := yaml.Marshal(&yaml.Node{
Kind: yaml.SequenceNode,
Tag: "!!seq",
Content: []*yaml.Node{node.oldYAML()},
})
if err != nil {
return "", err
}
return processMultiline(string(raw), prefixByFn(prefixDel), indentByFn(f.indent)), nil
}
func (f *seqItemFormatter) formatInsert(node diffNode) (string, error) {
raw, err := yaml.Marshal(&yaml.Node{
Kind: yaml.SequenceNode,
Tag: "!!seq",
Content: []*yaml.Node{node.newYAML()},
})
if err != nil {
return "", err
}
return processMultiline(string(raw), prefixByFn(prefixAdd), indentByFn(f.indent)), nil
}
func (f *seqItemFormatter) formatMod(node diffNode) (string, error) {
oldValue, newValue, err := marshalValues(node)
if err != nil {
return "", err
}
content := fmt.Sprintf("- %s -> %s", oldValue, newValue)
return processMultiline(content, prefixByFn(prefixMod), indentByFn(f.indent)), nil
}
func (f *seqItemFormatter) formatPath(node diffNode) string {
return process(color.Faint.Sprint("- (changed item)"), prefixByFn(prefixMod), indentByFn(f.indent)) + "\n"
}
func (f *seqItemFormatter) nextIndent() int {
/* A seq item diff should look like:
- (item)
~ Field1: a
+ Field2: b
Where "~ Field1: a" and "+ Field2: b" are its children. The indentation should increase by len("- "), which is 2.
*/
return f.indent + 2
}
type keyedFormatter struct {
indent int
}
func (f *keyedFormatter) formatDel(node diffNode) (string, error) {
raw, err := yaml.Marshal(&yaml.Node{
Kind: yaml.MappingNode,
Tag: "!!map",
Content: []*yaml.Node{
{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: node.key(),
},
node.oldYAML(),
},
})
if err != nil {
return "", err
}
return processMultiline(string(raw), prefixByFn(prefixDel), indentByFn(f.indent)), nil
}
func (f *keyedFormatter) formatInsert(node diffNode) (string, error) {
raw, err := yaml.Marshal(&yaml.Node{
Kind: yaml.MappingNode,
Tag: "!!map",
Content: []*yaml.Node{
{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: node.key(),
},
node.newYAML(),
},
})
if err != nil {
return "", err
}
return processMultiline(string(raw), prefixByFn(prefixAdd), indentByFn(f.indent)), nil
}
func (f *keyedFormatter) formatMod(node diffNode) (string, error) {
oldValue, newValue, err := marshalValues(node)
if err != nil {
return "", err
}
content := fmt.Sprintf("%s: %s -> %s", node.key(), oldValue, newValue)
return processMultiline(content, prefixByFn(prefixMod), indentByFn(f.indent)), nil
}
func (f *keyedFormatter) formatPath(node diffNode) string {
return process(node.key()+":"+"\n", prefixByFn(prefixMod), indentByFn(f.indent))
}
func (f *keyedFormatter) nextIndent() int {
return f.indent + indentInc
}
type documentFormatter struct{}
func (f *documentFormatter) formatMod(_ diffNode) (string, error) {
return "", nil
}
func (f *documentFormatter) formatDel(node diffNode) (string, error) {
raw, err := yaml.Marshal(node.oldYAML())
if err != nil {
return "", err
}
return processMultiline(string(raw), prefixByFn(prefixDel), indentByFn(0)), nil
}
func (f *documentFormatter) formatInsert(node diffNode) (string, error) {
raw, err := yaml.Marshal(node.newYAML())
if err != nil {
return "", err
}
return processMultiline(string(raw), prefixByFn(prefixAdd), indentByFn(0)), nil
}
func (f *documentFormatter) formatPath(_ diffNode) string {
return ""
}
func (f *documentFormatter) nextIndent() int {
return 0
}
func marshalValues(node diffNode) (string, string, error) {
var oldValue, newValue string
if v, err := yaml.Marshal(node.oldYAML()); err != nil { // NOTE: Marshal handles YAML tags such as `!Ref` and `!Sub`.
return "", "", err
} else {
oldValue = strings.TrimSuffix(string(v), "\n")
}
if v, err := yaml.Marshal(node.newYAML()); err != nil {
return "", "", err
} else {
newValue = strings.TrimSuffix(string(v), "\n")
}
return oldValue, newValue, nil
}
func prefixByFn(prefix string) func(line string) string {
return func(line string) string {
return fmt.Sprintf("%s %s", prefix, line)
}
}
func indentByFn(count int) func(line string) string {
return func(line string) string {
return fmt.Sprintf("%s%s", strings.Repeat(" ", count), line)
}
}
func process(line string, fn ...func(line string) string) string {
for _, f := range fn {
line = f(line)
}
return line
}
func processMultiline(multiline string, fn ...func(line string) string) string {
var processed []string
for _, line := range strings.Split(strings.TrimRight(multiline, "\n"), "\n") {
processed = append(processed, process(line, fn...))
}
return strings.Join(processed, "\n")
}
| 203 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package diff
import (
"gopkg.in/yaml.v3"
)
type action int
const (
actionMatch action = iota
actionMod
actionDel
actionInsert
actionDone
)
type tracker[T any] struct {
index int
data []T
}
func newLCSStateMachine(fromSeq, toSeq []yaml.Node, lcsIndices []lcsIndex) lcsStateMachine {
return lcsStateMachine{
from: tracker[yaml.Node]{data: fromSeq},
to: tracker[yaml.Node]{data: toSeq},
lcsIndices: tracker[lcsIndex]{data: lcsIndices},
}
}
type lcsStateMachine struct {
from tracker[yaml.Node]
to tracker[yaml.Node]
lcsIndices tracker[lcsIndex]
currAction action
}
func (sm *lcsStateMachine) action() action {
var action action
var (
lcsDone = sm.lcsIndices.index >= len(sm.lcsIndices.data)
fromDone = sm.from.index >= len(sm.from.data)
toDone = sm.to.index >= len(sm.to.data)
)
if lcsDone {
switch {
case fromDone && toDone:
action = actionDone
case toDone:
// Ex: "a,d,e" -> "a". When the lcsStateMachine moves to "d" in "from", both lcs and "to" are done, and "d,e" are considered deleted.
action = actionDel
case fromDone:
// Ex: "a" -> "a,d,e". When the lcsStateMachine moves to "d" in "to", both lcs and "from" are done, and "d,e" are considered to be inserted.
action = actionInsert
default:
// Ex: "a,b" -> "a,c". When the lcsStateMachine moves to (b,c), lcs is done, and b is modified into c.
action = actionMod
}
sm.currAction = action
return action
}
commonIdx := sm.lcsIndices.data[sm.lcsIndices.index]
switch {
case sm.from.index == commonIdx.inA && sm.to.index == commonIdx.inB:
action = actionMatch
case sm.from.index != commonIdx.inA && sm.to.index != commonIdx.inB:
action = actionMod
case sm.from.index != commonIdx.inA:
// Ex: "a,b,c" -> "c,1,2". When the lcsStateMachine is at (a,c /(b,c), only "c" is common, a,b are considered deleted.
action = actionDel
default:
// Ex: "a,b,c" -> "1,2,a". When the lcsStateMachine is at (a,1) and (a,2), only "a" is common, "1,2" are considered inserted.
action = actionInsert
}
sm.currAction = action
return action
}
func (sm *lcsStateMachine) peek() action {
lcsIdxOld, fromIdxOld, toIdxOld, actionOld := sm.lcsIndices.index, sm.from.index, sm.to.index, sm.currAction
sm.next()
next := sm.action()
sm.lcsIndices.index, sm.from.index, sm.to.index, sm.currAction = lcsIdxOld, fromIdxOld, toIdxOld, actionOld
return next
}
func (sm *lcsStateMachine) next() {
switch sm.currAction {
case actionMatch:
sm.lcsIndices.index++
fallthrough
case actionMod:
sm.from.index++
sm.to.index++
case actionDel:
sm.from.index++
case actionInsert:
sm.to.index++
}
}
func (sm *lcsStateMachine) fromItem() yaml.Node {
return sm.from.data[sm.from.index]
}
func (sm *lcsStateMachine) toItem() yaml.Node {
return sm.to.data[sm.to.index]
}
func (sm *lcsStateMachine) fromIndex() int {
return sm.from.index
}
func (sm *lcsStateMachine) toIndex() int {
return sm.to.index
}
| 119 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package diff
type eqFunc func(inA, inB int) bool
type lcsIndex struct {
inA int
inB int
}
// longestCommonSubsequence computes the longest common subsequence of two lists, and returns two lists that contain
// the positions of the common items in the input lists, respectively.
// When multiple correct answers exists, the function picks one of them deterministically.
// Example:
// input_a = ["a", "c", "b", "b", "d"], input_b = ["a", "B", "b", "c", "c", "d"]
// One LCS is ["a","c","d"].
// "a" is input_a[0] and input_b[0], "c" is in input_a[1] and input_b[3], "d" is input_a[4] and input_b[5].
// Therefore, the output will be [0,1,4], [0,3,5]
// Note that the parameter function `eq` is guaranteed to be called on every combination of a's and b's items.
func longestCommonSubsequence[T any](a []T, b []T, eq eqFunc) []lcsIndex {
if len(a) == 0 || len(b) == 0 {
return nil
}
// Initialize the matrix
lcs := make([][]int, len(a)+1)
for i := 0; i < len(a)+1; i++ {
lcs[i] = make([]int, len(b)+1)
lcs[i][len(b)] = 0
}
for j := 0; j < len(b)+1; j++ {
lcs[len(a)][j] = 0
}
// Compute the lengths of the LCS for all sub lists.
for i := len(a) - 1; i >= 0; i-- {
for j := len(b) - 1; j >= 0; j-- {
switch {
case eq(i, j):
lcs[i][j] = 1 + lcs[i+1][j+1]
case lcs[i+1][j] < lcs[i][j+1]:
lcs[i][j] = lcs[i][j+1]
default:
lcs[i][j] = lcs[i+1][j]
}
}
}
// Backtrace to construct the LCS.
var i, j int
var lcsIndices []lcsIndex
for {
if i >= len(a) || j >= len(b) {
break
}
switch {
case eq(i, j):
lcsIndices = append(lcsIndices, lcsIndex{
inA: i,
inB: j,
})
i++
j++
case lcs[i+1][j] < lcs[i][j+1]:
j++
default:
i++
}
}
return lcsIndices
}
| 71 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package diff
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func Test_longestCommonSubsequence_string(t *testing.T) {
testCases := []struct {
inA []string
inB []string
wantedA []int
wantedB []int
}{
{
inA: []string{"a", "b", "c"},
inB: []string{"a", "b", "c"},
wantedA: []int{0, 1, 2},
wantedB: []int{0, 1, 2},
},
{
inA: []string{"a", "b", "c"},
inB: []string{"c"},
wantedA: []int{2},
wantedB: []int{0},
},
{
inA: []string{"a", "b", "c"},
inB: []string{"a", "B", "c"},
wantedA: []int{0, 2},
wantedB: []int{0, 2},
},
{
inA: []string{"a", "b", "c"},
inB: []string{"a", "B", "C"},
wantedA: []int{0},
wantedB: []int{0},
},
{
inA: []string{"a", "c", "b", "b", "d"},
inB: []string{"a", "B", "b", "c", "c", "d"},
// NOTE: the wanted sequence here is a,b,d; however, a, c, d is also correct.
wantedA: []int{0, 3, 4}, // NOTE: 0, 2, 4 is also correct.
wantedB: []int{0, 2, 5},
},
{
inA: []string{"a", "b", "B", "B", "c", "d", "D", "d", "e", "f"},
inB: []string{"a", "B", "C", "d", "d", "e", "f"},
wantedA: []int{0, 2, 5, 7, 8, 9},
wantedB: []int{0, 1, 3, 4, 5, 6},
},
{
inB: []string{},
},
{
inA: []string{"a"},
},
{
inA: []string{"a"},
inB: []string{"a"},
wantedA: []int{0},
wantedB: []int{0},
},
{
inA: []string{"a"},
inB: []string{"b"},
},
{
inA: []string{"a", "b", "c", "c"},
inB: []string{"c"},
wantedA: []int{2},
wantedB: []int{0},
},
}
for idx, tc := range testCases {
t.Run(fmt.Sprintf("string case %v", idx), func(t *testing.T) {
got := longestCommonSubsequence(tc.inA, tc.inB, func(inA, inB int) bool { return tc.inA[inA] == tc.inB[inB] })
var wanted []lcsIndex
for idx := range tc.wantedA {
wanted = append(wanted, lcsIndex{
inA: tc.wantedA[idx],
inB: tc.wantedB[idx],
})
}
require.Equal(t, wanted, got)
})
}
}
func Test_longestCommonSubsequence_yamlNode(t *testing.T) {
testCases := map[string]struct {
inA string
inB string
mockEq eqFunc
wantedA []int
wantedB []int
}{
"map/reorder map field": { // The order of map fields should not matter.
inA: `- Sid: power
Action: '*'
Resources: '*'`,
inB: `- Sid: power
Resources: '*'
Action: '*'`,
mockEq: func(inA, inB int) bool {
return true
},
wantedA: []int{0},
wantedB: []int{0},
},
"map/change node scalar style": { // The style should not matter.
inA: `- Sid: 'power'`,
inB: `- Sid: "power"`,
mockEq: func(inA, inB int) bool {
return true
},
wantedA: []int{0},
wantedB: []int{0},
},
"map/not equal": {
inA: `- Sid: power
Action: '*'`,
inB: `- Sid: no power
Resources: '*'`,
mockEq: func(inA, inB int) bool {
return false
},
},
"map/reorder": {
inA: `- Sid: power
- Sid: less power`,
inB: `- Sid: less power
- Sid: power`,
mockEq: func(inA, inB int) bool {
return (inA == 0 && inB == 1) || (inA == 1 && inB == 0)
},
wantedA: []int{1}, // Note: wantedA, wantedB = [0], [1] is also correct.
wantedB: []int{0},
},
"scalar/change style": { // The style should not matter.
inA: `['a','b']`,
inB: `["a","b"]`,
mockEq: func(inA, inB int) bool {
return inA == inB
},
wantedA: []int{0, 1},
wantedB: []int{0, 1},
},
"scalar/mixed style": { // The style should not matter.
inA: `['a',"b"]`,
inB: `["a",'b']`,
mockEq: func(inA, inB int) bool {
return inA == inB
},
wantedA: []int{0, 1},
wantedB: []int{0, 1},
},
"scalar/not equal": { // The style should not matter.
inA: `[a,b,c,d]`,
inB: `[a,d]`,
mockEq: func(inA, inB int) bool {
return inA == 0 && inB == 0 || inA == 3 && inB == 1
},
wantedA: []int{0, 3},
wantedB: []int{0, 1},
},
"change list style": {
inA: `- a
- b
- c`,
inB: `[a,b,c]`,
mockEq: func(inA, inB int) bool {
return inA == inB
},
wantedA: []int{0, 1, 2}, // Note: wantedA, wantedB = [0], [1] is also correct.
wantedB: []int{0, 1, 2},
},
"change item kind": {
inA: `- a
- b
- c`,
inB: `- Sid: hey
- a
`,
mockEq: func(inA, inB int) bool {
return inA == 0 && inB == 1
},
wantedA: []int{0}, // Note: wantedA, wantedB = [0], [1] is also correct.
wantedB: []int{1},
},
}
for idx, tc := range testCases {
t.Run(idx, func(t *testing.T) {
var inANode, inBNode []yaml.Node
require.NoError(t, yaml.Unmarshal([]byte(tc.inA), &inANode))
require.NoError(t, yaml.Unmarshal([]byte(tc.inB), &inBNode))
got := longestCommonSubsequence(inANode, inBNode, tc.mockEq)
var wanted []lcsIndex
for idx := range tc.wantedA {
wanted = append(wanted, lcsIndex{
inA: tc.wantedA[idx],
inB: tc.wantedB[idx],
})
}
require.Equal(t, wanted, got)
})
}
}
| 218 |
copilot-cli | aws | Go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package diff
import (
"fmt"
"strings"
"gopkg.in/yaml.v3"
)
// overrider overrides the parsing behavior between two yaml nodes under certain keys.
type overrider interface {
match(from, to *yaml.Node, key string, overrider overrider) bool
parse(from, to *yaml.Node, key string, overrider overrider) (diffNode, error)
}
type ignoreSegment struct {
key string
next *ignoreSegment
}
// ignorer ignores the diff between two yaml nodes under specified key paths.
type ignorer struct {
curr *ignoreSegment
}
// match returns true if the difference between the from and to at the key should be ignored.
func (m *ignorer) match(_, _ *yaml.Node, key string, _ overrider) bool {
if key != m.curr.key {
return false
}
if m.curr.next == nil {
return true
}
m.curr = m.curr.next
return false
}
// Parse is a no-op for an ignorer.
func (m *ignorer) parse(_, _ *yaml.Node, _ string, _ overrider) (diffNode, error) {
return nil, nil
}
// Check https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference.html for
// a complete list of intrinsic functions. Some are not included here as they do not need an overrider.
var (
exists = struct{}{}
intrinsicFunctionFullNames = map[string]struct{}{
"Ref": exists,
"Fn::Base64": exists,
"Fn::Cidr": exists,
"Fn::FindInMap": exists,
"Fn::GetAtt": exists,
"Fn::GetAZs": exists,
"Fn::ImportValue": exists,
"Fn::Join": exists,
"Fn::Select": exists,
"Fn::Split": exists,
"Fn::Sub": exists,
"Fn::Transform": exists,
// Condition functions.
"Condition": exists,
"Fn::And": exists,
"Fn::Equals": exists,
"Fn::If": exists,
"Fn::Not": exists,
"Fn::Or": exists,
}
intrinsicFunctionShortNames = map[string]struct{}{
"!Ref": exists,
"!Base64": exists,
"!Cidr": exists,
"!FindInMap": exists,
"!GetAtt": exists,
"!GetAZs": exists,
"!ImportValue": exists,
"!Join": exists,
"!Select": exists,
"!Split": exists,
"!Sub": exists,
"Transform": exists,
// Condition functions.
"!Condition": exists,
"!And": exists,
"!Equals": exists,
"!If": exists,
"!Not": exists,
"!Or": exists,
}
)
// intrinsicFuncMatcher matches intrinsic function nodes.
type intrinsicFuncMatcher struct{}
// match returns true if from and to node represent the same intrinsic function.
// Example1: "!Ref" and "Ref:" will return true.
// Example2: "!Ref" and "!Ref" will return true.
// Example3: "!Ref" and "Fn::GetAtt:" will return false because they are different intrinsic functions.
// Example4: "!Magic" and "Fn::Magic" will return false because they are not intrinsic functions.
func (_ *intrinsicFuncMatcher) match(from, to *yaml.Node, _ string, _ overrider) bool {
if from == nil || to == nil {
return false
}
fromFunc, toFunc := intrinsicFuncName(from), intrinsicFuncName(to)
return fromFunc != "" && toFunc != "" && fromFunc == toFunc
}
// intrinsicFuncMatcher matches and parses two intrinsic function nodes written in different form (full/short).
type intrinsicFuncMapTagConverter struct {
intrinsicFunc intrinsicFuncMatcher
}
// match returns true if from and to node represent the same intrinsic function written in different (full/short) form.
// Example1: "!Ref" and "Ref:" will return true.
// Example2: "!Ref" and "!Ref" will return false because they are written in the same form (i.e. short).
// Example3: "!Ref" and "Fn::GetAtt:" will return false because they are different intrinsic functions.
// For more on intrinsic functions and full/short forms, read https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ToJsonString.html.
func (converter *intrinsicFuncMapTagConverter) match(from, to *yaml.Node, key string, overrider overrider) bool {
if !converter.intrinsicFunc.match(from, to, key, overrider) {
return false
}
// Exactly one of from and to is full form.
return (from.Kind == yaml.MappingNode || to.Kind == yaml.MappingNode) && (from.Kind != to.Kind)
}
// parse compares two intrinsic function nodes written in different form (full vs. short).
// When the inputs to the intrinsic functions have different data types, parse assumes that no type conversion is needed
// for correct comparison.
// E.g. given "!Func: [1,2]" and "Fn::Func: '1,2'", parse assumes that comparing [1,2] with "1,2" produces the desired result.
// Note that this does not hold for "GetAtt" function: "!GetAtt: [1,2]" and "!GetAtt: 1.2" should be considered the same.
// parse assumes that from and to are matched by intrinsicFuncMapTagConverter.
func (*intrinsicFuncMapTagConverter) parse(from, to *yaml.Node, key string, overrider overrider) (diffNode, error) {
var diff diffNode
var err error
if from.Kind == yaml.MappingNode {
// The full form mapping node always contain only one child node. The second element in `Content` is the
// value of the child node. Read https://www.efekarakus.com/2020/05/30/deep-dive-go-yaml-cfn.html.
diff, err = parse(from.Content[1], stripTag(to), from.Content[0].Value, overrider)
} else {
diff, err = parse(stripTag(from), to.Content[1], to.Content[0].Value, overrider)
}
if diff == nil {
return nil, err
}
return &keyNode{
keyValue: key,
childNodes: []diffNode{diff},
}, nil
}
// getAttConverter matches and parses two YAML nodes that calls the intrinsic function "GetAtt".
// Unlike intrinsicFuncMapTagConverter, getAttConverter does not require "from" and "to" to be written in different form.
// The input to "GetAtt" could be either a sequence or a scalar. All the followings are valid and should be considered equal.
// Fn::GetAtt: LogicalID.Att.SubAtt, Fn::GetAtt: [LogicalID, Att.SubAtt], !GetAtt LogicalID.Att.SubAtt, !GetAtt [LogicalID, Att.SubAtt].
type getAttConverter struct {
intrinsicFuncMapTagConverter
}
// match returns true if both from node and to node are calling the "GetAtt" intrinsic function.
// "GetAtt" only accepts either sequence or scalar, therefore match returns false if either of from and to has invalid
// input node to "GetAtt".
// Example1: "!GetAtt" and "!GetAtt" returns true.
// Example2: "!GetAtt" and "Fn::GetAtt" returns true.
// Example3: "!Ref" and "!GetAtt" returns false.
// Example4: "!GetAtt [a,b]" and "Fn::GetAtt: a:b" returns false because the input type is wrong.
func (converter *getAttConverter) match(from, to *yaml.Node, key string, overrider overrider) bool {
if !converter.intrinsicFunc.match(from, to, key, overrider) {
return false
}
if intrinsicFuncName(from) != "GetAtt" {
return false
}
fromValue, toValue := from, to
if from.Kind == yaml.MappingNode {
// A valid full-form intrinsic function always contain a child node.
// This must be valid because it has passed `converter.intrinsicFunc.match`.
fromValue = from.Content[1]
}
if to.Kind == yaml.MappingNode {
toValue = to.Content[1]
}
return (fromValue.Kind == yaml.ScalarNode || fromValue.Kind == yaml.SequenceNode) && (toValue.Kind == yaml.ScalarNode || toValue.Kind == yaml.SequenceNode)
}
// parse compares two nodes that call the "GetAtt" function. Both from and to can be written in either full or short form.
// parse assumes that from and to are already matched by getAttConverter.
func (converter *getAttConverter) parse(from, to *yaml.Node, key string, overrider overrider) (diffNode, error) {
// Extract the input node to GetAtt.
fromValue, toValue := from, to
if from.Kind == yaml.MappingNode {
fromValue = from.Content[1] // A valid full-form intrinsic function always contain a child node.
}
if to.Kind == yaml.MappingNode {
toValue = to.Content[1]
}
// If the input node are of the same type (i.e. both seq or both scalar), parse them normally.
// Otherwise, first convert the scalar input to seq input, then parse.
if fromValue.Kind != toValue.Kind {
var err error
switch {
case fromValue.Kind == yaml.ScalarNode:
fromValue, err = getAttScalarToSeq(fromValue)
case toValue.Kind == yaml.ScalarNode:
toValue, err = getAttScalarToSeq(toValue)
}
if err != nil {
return nil, err
}
}
diff, err := parse(stripTag(fromValue), stripTag(toValue), "Fn::GetAtt", overrider)
if diff == nil {
return nil, err
}
return &keyNode{
keyValue: key,
childNodes: []diffNode{diff},
}, nil
}
// intrinsicFuncName returns the name ofo the intrinsic function given a node.
// If the node is not an intrinsic function node, it returns an empty string.
func intrinsicFuncName(node *yaml.Node) string {
if node.Kind != yaml.MappingNode {
if _, ok := intrinsicFunctionShortNames[node.Tag]; !ok {
return ""
}
return strings.TrimPrefix(node.Tag, "!")
}
if len(node.Content) != 2 {
// The full form mapping node always contain only one child node, whose key is the func name in full form.
// Read https://www.efekarakus.com/2020/05/30/deep-dive-go-yaml-cfn.html.
return ""
}
if _, ok := intrinsicFunctionFullNames[node.Content[0].Value]; !ok {
return ""
}
return strings.TrimPrefix(node.Content[0].Value, "Fn::")
}
func stripTag(node *yaml.Node) *yaml.Node {
return &yaml.Node{
Kind: node.Kind,
Style: node.Style,
Content: node.Content,
Value: node.Value,
}
}
// Transform scalar node "LogicalID.Attr" to sequence node [LogicalID, Attr].
func getAttScalarToSeq(scalarNode *yaml.Node) (*yaml.Node, error) {
split := strings.SplitN(scalarNode.Value, ".", 2) // split has at least one element in it.
var seqFromScalar yaml.Node
if err := yaml.Unmarshal([]byte(fmt.Sprintf("[%s]", strings.Join(split, ","))), &seqFromScalar); err != nil {
return nil, err
}
if len(seqFromScalar.Content) == 0 {
return nil, nil
}
return seqFromScalar.Content[0], nil
}
| 263 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.